1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import java.io.BufferedOutputStream; 29 import java.io.ByteArrayInputStream; 30 import java.io.ByteArrayOutputStream; 31 import java.io.EOFException; 32 import java.io.FilterOutputStream; 33 import java.io.IOException; 34 import java.io.InputStream; 35 import java.io.OutputStream; 36 import java.nio.ByteBuffer; 37 import java.nio.MappedByteBuffer; 38 import java.nio.channels.FileChannel; 39 import java.nio.channels.FileLock; 40 import java.nio.channels.ReadableByteChannel; 41 import java.nio.channels.SeekableByteChannel; 42 import java.nio.channels.WritableByteChannel; 43 import java.nio.file.*; 44 import java.nio.file.attribute.FileAttribute; 45 import java.nio.file.attribute.FileTime; 46 import java.nio.file.attribute.GroupPrincipal; 47 import java.nio.file.attribute.PosixFilePermission; 48 import java.nio.file.attribute.UserPrincipal; 49 import java.nio.file.attribute.UserPrincipalLookupService; 50 import java.nio.file.spi.FileSystemProvider; 51 import java.security.AccessController; 52 import java.security.PrivilegedAction; 53 import java.security.PrivilegedActionException; 54 import java.security.PrivilegedExceptionAction; 55 import java.util.*; 56 import java.util.concurrent.locks.ReadWriteLock; 57 import java.util.concurrent.locks.ReentrantReadWriteLock; 58 import java.util.regex.Pattern; 59 import java.util.zip.CRC32; 60 import java.util.zip.Deflater; 61 import java.util.zip.DeflaterOutputStream; 62 import java.util.zip.Inflater; 63 import java.util.zip.InflaterInputStream; 64 import java.util.zip.ZipException; 65 66 import static java.lang.Boolean.TRUE; 67 import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES; 68 import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; 69 import static java.nio.file.StandardOpenOption.APPEND; 70 import static java.nio.file.StandardOpenOption.CREATE; 71 import static java.nio.file.StandardOpenOption.CREATE_NEW; 72 import static java.nio.file.StandardOpenOption.READ; 73 import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; 74 import static java.nio.file.StandardOpenOption.WRITE; 75 import static jdk.nio.zipfs.ZipConstants.*; 76 import static jdk.nio.zipfs.ZipUtils.*; 77 78 /** 79 * A FileSystem built on a zip file 80 * 81 * @author Xueming Shen 82 */ 83 class ZipFileSystem extends FileSystem { 84 private final ZipFileSystemProvider provider; 85 private final Path zfpath; 86 final ZipCoder zc; 87 private final ZipPath rootdir; 88 private boolean readOnly = false; // readonly file system 89 90 // configurable by env map 91 private final boolean noExtt; // see readExtra() 92 private final boolean useTempFile; // use a temp file for newOS, default 93 // is to use BAOS for better performance 94 private static final boolean isWindows = AccessController.doPrivileged( 95 (PrivilegedAction<Boolean>)() -> System.getProperty("os.name") 96 .startsWith("Windows")); 97 private final boolean forceEnd64; 98 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 99 // METHOD_DEFLATED otherwise 100 101 ZipFileSystem(ZipFileSystemProvider provider, 102 Path zfpath, 103 Map<String, ?> env) throws IOException 104 { 105 // default encoding for name/comment 106 String nameEncoding = env.containsKey("encoding") ? 107 (String)env.get("encoding") : "UTF-8"; 108 this.noExtt = "false".equals(env.get("zipinfo-time")); 109 this.useTempFile = isTrue(env, "useTempFile"); 110 this.forceEnd64 = isTrue(env, "forceZIP64End"); 111 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 112 if (Files.notExists(zfpath)) { 113 // create a new zip if not exists 114 if (isTrue(env, "create")) { 115 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 116 new END().write(os, 0, forceEnd64); 117 } 118 } else { 119 throw new FileSystemNotFoundException(zfpath.toString()); 120 } 121 } 122 // sm and existence check 123 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 124 boolean writeable = AccessController.doPrivileged( 125 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 126 this.readOnly = !writeable; 127 this.zc = ZipCoder.get(nameEncoding); 128 this.rootdir = new ZipPath(this, new byte[]{'/'}); 129 this.ch = Files.newByteChannel(zfpath, READ); 130 try { 131 this.cen = initCEN(); 132 } catch (IOException x) { 133 try { 134 this.ch.close(); 135 } catch (IOException xx) { 136 x.addSuppressed(xx); 137 } 138 throw x; 139 } 140 this.provider = provider; 141 this.zfpath = zfpath; 142 } 143 144 // returns true if there is a name=true/"true" setting in env 145 private static boolean isTrue(Map<String, ?> env, String name) { 146 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 147 } 148 149 @Override 150 public FileSystemProvider provider() { 151 return provider; 152 } 153 154 @Override 155 public String getSeparator() { 156 return "/"; 157 } 158 159 @Override 160 public boolean isOpen() { 161 return isOpen; 162 } 163 164 @Override 165 public boolean isReadOnly() { 166 return readOnly; 167 } 168 169 private void checkWritable() throws IOException { 170 if (readOnly) 171 throw new ReadOnlyFileSystemException(); 172 } 173 174 void setReadOnly() { 175 this.readOnly = true; 176 } 177 178 @Override 179 public Iterable<Path> getRootDirectories() { 180 return List.of(rootdir); 181 } 182 183 ZipPath getRootDir() { 184 return rootdir; 185 } 186 187 @Override 188 public ZipPath getPath(String first, String... more) { 189 if (more.length == 0) { 190 return new ZipPath(this, first); 191 } 192 StringBuilder sb = new StringBuilder(); 193 sb.append(first); 194 for (String path : more) { 195 if (path.length() > 0) { 196 if (sb.length() > 0) { 197 sb.append('/'); 198 } 199 sb.append(path); 200 } 201 } 202 return new ZipPath(this, sb.toString()); 203 } 204 205 @Override 206 public UserPrincipalLookupService getUserPrincipalLookupService() { 207 throw new UnsupportedOperationException(); 208 } 209 210 @Override 211 public WatchService newWatchService() { 212 throw new UnsupportedOperationException(); 213 } 214 215 FileStore getFileStore(ZipPath path) { 216 return new ZipFileStore(path); 217 } 218 219 @Override 220 public Iterable<FileStore> getFileStores() { 221 return List.of(new ZipFileStore(rootdir)); 222 } 223 224 private static final Set<String> supportedFileAttributeViews = 225 Set.of("basic", "zip"); 226 227 @Override 228 public Set<String> supportedFileAttributeViews() { 229 return supportedFileAttributeViews; 230 } 231 232 @Override 233 public String toString() { 234 return zfpath.toString(); 235 } 236 237 Path getZipFile() { 238 return zfpath; 239 } 240 241 private static final String GLOB_SYNTAX = "glob"; 242 private static final String REGEX_SYNTAX = "regex"; 243 244 @Override 245 public PathMatcher getPathMatcher(String syntaxAndInput) { 246 int pos = syntaxAndInput.indexOf(':'); 247 if (pos <= 0 || pos == syntaxAndInput.length()) { 248 throw new IllegalArgumentException(); 249 } 250 String syntax = syntaxAndInput.substring(0, pos); 251 String input = syntaxAndInput.substring(pos + 1); 252 String expr; 253 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 254 expr = toRegexPattern(input); 255 } else { 256 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 257 expr = input; 258 } else { 259 throw new UnsupportedOperationException("Syntax '" + syntax + 260 "' not recognized"); 261 } 262 } 263 // return matcher 264 final Pattern pattern = Pattern.compile(expr); 265 return new PathMatcher() { 266 @Override 267 public boolean matches(Path path) { 268 return pattern.matcher(path.toString()).matches(); 269 } 270 }; 271 } 272 273 @Override 274 public void close() throws IOException { 275 beginWrite(); 276 try { 277 if (!isOpen) 278 return; 279 isOpen = false; // set closed 280 } finally { 281 endWrite(); 282 } 283 if (!streams.isEmpty()) { // unlock and close all remaining streams 284 Set<InputStream> copy = new HashSet<>(streams); 285 for (InputStream is : copy) 286 is.close(); 287 } 288 beginWrite(); // lock and sync 289 try { 290 AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> { 291 sync(); return null; 292 }); 293 ch.close(); // close the ch just in case no update 294 // and sync didn't close the ch 295 } catch (PrivilegedActionException e) { 296 throw (IOException)e.getException(); 297 } finally { 298 endWrite(); 299 } 300 301 synchronized (inflaters) { 302 for (Inflater inf : inflaters) 303 inf.end(); 304 } 305 synchronized (deflaters) { 306 for (Deflater def : deflaters) 307 def.end(); 308 } 309 310 IOException ioe = null; 311 synchronized (tmppaths) { 312 for (Path p : tmppaths) { 313 try { 314 AccessController.doPrivileged( 315 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 316 } catch (PrivilegedActionException e) { 317 IOException x = (IOException)e.getException(); 318 if (ioe == null) 319 ioe = x; 320 else 321 ioe.addSuppressed(x); 322 } 323 } 324 } 325 provider.removeFileSystem(zfpath, this); 326 if (ioe != null) 327 throw ioe; 328 } 329 330 ZipFileAttributes getFileAttributes(byte[] path) 331 throws IOException 332 { 333 Entry e; 334 beginRead(); 335 try { 336 ensureOpen(); 337 e = getEntry(path); 338 if (e == null) { 339 IndexNode inode = getInode(path); 340 if (inode == null) 341 return null; 342 // pseudo directory, uses METHOD_STORED 343 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 344 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 345 } 346 } finally { 347 endRead(); 348 } 349 return e; 350 } 351 352 void checkAccess(byte[] path) throws IOException { 353 beginRead(); 354 try { 355 ensureOpen(); 356 // is it necessary to readCEN as a sanity check? 357 if (getInode(path) == null) { 358 throw new NoSuchFileException(toString()); 359 } 360 361 } finally { 362 endRead(); 363 } 364 } 365 366 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 367 throws IOException 368 { 369 checkWritable(); 370 beginWrite(); 371 try { 372 ensureOpen(); 373 Entry e = getEntry(path); // ensureOpen checked 374 if (e == null) 375 throw new NoSuchFileException(getString(path)); 376 if (e.type == Entry.CEN) 377 e.type = Entry.COPY; // copy e 378 if (mtime != null) 379 e.mtime = mtime.toMillis(); 380 if (atime != null) 381 e.atime = atime.toMillis(); 382 if (ctime != null) 383 e.ctime = ctime.toMillis(); 384 update(e); 385 } finally { 386 endWrite(); 387 } 388 } 389 390 void setPermissions(byte[] path, Set<PosixFilePermission> perms) 391 throws IOException 392 { 393 checkWritable(); 394 beginWrite(); 395 try { 396 ensureOpen(); 397 Entry e = getEntry(path); // ensureOpen checked 398 if (e == null) { 399 throw new NoSuchFileException(getString(path)); 400 } 401 if (e.type == Entry.CEN) { 402 e.type = Entry.COPY; // copy e 403 } 404 e.posixPerms = perms == null ? -1 : ZipUtils.permsToFlags(perms); 405 update(e); 406 } finally { 407 endWrite(); 408 } 409 } 410 411 boolean exists(byte[] path) 412 throws IOException 413 { 414 beginRead(); 415 try { 416 ensureOpen(); 417 return getInode(path) != null; 418 } finally { 419 endRead(); 420 } 421 } 422 423 boolean isDirectory(byte[] path) 424 throws IOException 425 { 426 beginRead(); 427 try { 428 IndexNode n = getInode(path); 429 return n != null && n.isDir(); 430 } finally { 431 endRead(); 432 } 433 } 434 435 // returns the list of child paths of "path" 436 Iterator<Path> iteratorOf(ZipPath dir, 437 DirectoryStream.Filter<? super Path> filter) 438 throws IOException 439 { 440 beginWrite(); // iteration of inodes needs exclusive lock 441 try { 442 ensureOpen(); 443 byte[] path = dir.getResolvedPath(); 444 IndexNode inode = getInode(path); 445 if (inode == null) 446 throw new NotDirectoryException(getString(path)); 447 List<Path> list = new ArrayList<>(); 448 IndexNode child = inode.child; 449 while (child != null) { 450 // (1) assume all path from zip file itself is "normalized" 451 // (2) IndexNode.name is absolute. see IndexNode(byte[],int,int) 452 // (3) if parent "dir" is relative when ZipDirectoryStream 453 // is created, the returned child path needs to be relative 454 // as well. 455 byte[] cname = child.name; 456 if (!dir.isAbsolute()) { 457 cname = Arrays.copyOfRange(cname, 1, cname.length); 458 } 459 ZipPath zpath = new ZipPath(this, cname, true); 460 if (filter == null || filter.accept(zpath)) 461 list.add(zpath); 462 child = child.sibling; 463 } 464 return list.iterator(); 465 } finally { 466 endWrite(); 467 } 468 } 469 470 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 471 throws IOException 472 { 473 checkWritable(); 474 // dir = toDirectoryPath(dir); 475 beginWrite(); 476 try { 477 ensureOpen(); 478 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 479 throw new FileAlreadyExistsException(getString(dir)); 480 checkParents(dir); 481 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 482 update(e); 483 } finally { 484 endWrite(); 485 } 486 } 487 488 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 489 throws IOException 490 { 491 checkWritable(); 492 if (Arrays.equals(src, dst)) 493 return; // do nothing, src and dst are the same 494 495 beginWrite(); 496 try { 497 ensureOpen(); 498 Entry eSrc = getEntry(src); // ensureOpen checked 499 500 if (eSrc == null) 501 throw new NoSuchFileException(getString(src)); 502 if (eSrc.isDir()) { // spec says to create dst dir 503 createDirectory(dst); 504 return; 505 } 506 boolean hasReplace = false; 507 boolean hasCopyAttrs = false; 508 for (CopyOption opt : options) { 509 if (opt == REPLACE_EXISTING) 510 hasReplace = true; 511 else if (opt == COPY_ATTRIBUTES) 512 hasCopyAttrs = true; 513 } 514 Entry eDst = getEntry(dst); 515 if (eDst != null) { 516 if (!hasReplace) 517 throw new FileAlreadyExistsException(getString(dst)); 518 } else { 519 checkParents(dst); 520 } 521 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 522 u.name(dst); // change name 523 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 524 { 525 u.type = eSrc.type; // make it the same type 526 if (deletesrc) { // if it's a "rename", take the data 527 u.bytes = eSrc.bytes; 528 u.file = eSrc.file; 529 } else { // if it's not "rename", copy the data 530 if (eSrc.bytes != null) 531 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 532 else if (eSrc.file != null) { 533 u.file = getTempPathForEntry(null); 534 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 535 } 536 } 537 } 538 if (!hasCopyAttrs) 539 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 540 update(u); 541 if (deletesrc) 542 updateDelete(eSrc); 543 } finally { 544 endWrite(); 545 } 546 } 547 548 // Returns an output stream for writing the contents into the specified 549 // entry. 550 OutputStream newOutputStream(byte[] path, OpenOption... options) 551 throws IOException 552 { 553 checkWritable(); 554 boolean hasCreateNew = false; 555 boolean hasCreate = false; 556 boolean hasAppend = false; 557 boolean hasTruncate = false; 558 for (OpenOption opt : options) { 559 if (opt == READ) 560 throw new IllegalArgumentException("READ not allowed"); 561 if (opt == CREATE_NEW) 562 hasCreateNew = true; 563 if (opt == CREATE) 564 hasCreate = true; 565 if (opt == APPEND) 566 hasAppend = true; 567 if (opt == TRUNCATE_EXISTING) 568 hasTruncate = true; 569 } 570 if (hasAppend && hasTruncate) 571 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 572 beginRead(); // only need a readlock, the "update()" will 573 try { // try to obtain a writelock when the os is 574 ensureOpen(); // being closed. 575 Entry e = getEntry(path); 576 if (e != null) { 577 if (e.isDir() || hasCreateNew) 578 throw new FileAlreadyExistsException(getString(path)); 579 if (hasAppend) { 580 InputStream is = getInputStream(e); 581 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 582 is.transferTo(os); 583 is.close(); 584 return os; 585 } 586 return getOutputStream(new Entry(e, Entry.NEW)); 587 } else { 588 if (!hasCreate && !hasCreateNew) 589 throw new NoSuchFileException(getString(path)); 590 checkParents(path); 591 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 592 } 593 } finally { 594 endRead(); 595 } 596 } 597 598 // Returns an input stream for reading the contents of the specified 599 // file entry. 600 InputStream newInputStream(byte[] path) throws IOException { 601 beginRead(); 602 try { 603 ensureOpen(); 604 Entry e = getEntry(path); 605 if (e == null) 606 throw new NoSuchFileException(getString(path)); 607 if (e.isDir()) 608 throw new FileSystemException(getString(path), "is a directory", null); 609 return getInputStream(e); 610 } finally { 611 endRead(); 612 } 613 } 614 615 private void checkOptions(Set<? extends OpenOption> options) { 616 // check for options of null type and option is an intance of StandardOpenOption 617 for (OpenOption option : options) { 618 if (option == null) 619 throw new NullPointerException(); 620 if (!(option instanceof StandardOpenOption)) 621 throw new IllegalArgumentException(); 622 } 623 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 624 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 625 } 626 627 628 // Returns an output SeekableByteChannel for either 629 // (1) writing the contents of a new entry, if the entry doesn't exit, or 630 // (2) updating/replacing the contents of an existing entry. 631 // Note: The content is not compressed. 632 private class EntryOutputChannel extends ByteArrayChannel { 633 Entry e; 634 635 EntryOutputChannel(Entry e) throws IOException { 636 super(e.size > 0? (int)e.size : 8192, false); 637 this.e = e; 638 if (e.mtime == -1) 639 e.mtime = System.currentTimeMillis(); 640 if (e.method == -1) 641 e.method = defaultMethod; 642 // store size, compressed size, and crc-32 in datadescriptor 643 e.flag = FLAG_DATADESCR; 644 if (zc.isUTF8()) 645 e.flag |= FLAG_USE_UTF8; 646 } 647 648 @Override 649 public void close() throws IOException { 650 e.bytes = toByteArray(); 651 e.size = e.bytes.length; 652 e.crc = -1; 653 super.close(); 654 update(e); 655 } 656 } 657 658 private int getCompressMethod(FileAttribute<?>... attrs) { 659 return defaultMethod; 660 } 661 662 // Returns a Writable/ReadByteChannel for now. Might consdier to use 663 // newFileChannel() instead, which dump the entry data into a regular 664 // file on the default file system and create a FileChannel on top of 665 // it. 666 SeekableByteChannel newByteChannel(byte[] path, 667 Set<? extends OpenOption> options, 668 FileAttribute<?>... attrs) 669 throws IOException 670 { 671 checkOptions(options); 672 if (options.contains(StandardOpenOption.WRITE) || 673 options.contains(StandardOpenOption.APPEND)) { 674 checkWritable(); 675 beginRead(); // only need a readlock, the "update()" will obtain 676 // thewritelock when the channel is closed 677 try { 678 ensureOpen(); 679 Entry e = getEntry(path); 680 if (e != null) { 681 if (e.isDir() || options.contains(CREATE_NEW)) 682 throw new FileAlreadyExistsException(getString(path)); 683 SeekableByteChannel sbc = 684 new EntryOutputChannel(new Entry(e, Entry.NEW)); 685 if (options.contains(APPEND)) { 686 try (InputStream is = getInputStream(e)) { // copyover 687 byte[] buf = new byte[8192]; 688 ByteBuffer bb = ByteBuffer.wrap(buf); 689 int n; 690 while ((n = is.read(buf)) != -1) { 691 bb.position(0); 692 bb.limit(n); 693 sbc.write(bb); 694 } 695 } 696 } 697 return sbc; 698 } 699 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 700 throw new NoSuchFileException(getString(path)); 701 checkParents(path); 702 return new EntryOutputChannel( 703 new Entry(path, Entry.NEW, false, getCompressMethod(attrs), attrs)); 704 705 } finally { 706 endRead(); 707 } 708 } else { 709 beginRead(); 710 try { 711 ensureOpen(); 712 Entry e = getEntry(path); 713 if (e == null || e.isDir()) 714 throw new NoSuchFileException(getString(path)); 715 try (InputStream is = getInputStream(e)) { 716 // TBD: if (e.size < NNNNN); 717 return new ByteArrayChannel(is.readAllBytes(), true); 718 } 719 } finally { 720 endRead(); 721 } 722 } 723 } 724 725 // Returns a FileChannel of the specified entry. 726 // 727 // This implementation creates a temporary file on the default file system, 728 // copy the entry data into it if the entry exists, and then create a 729 // FileChannel on top of it. 730 FileChannel newFileChannel(byte[] path, 731 Set<? extends OpenOption> options, 732 FileAttribute<?>... attrs) 733 throws IOException 734 { 735 checkOptions(options); 736 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 737 options.contains(StandardOpenOption.APPEND)); 738 beginRead(); 739 try { 740 ensureOpen(); 741 Entry e = getEntry(path); 742 if (forWrite) { 743 checkWritable(); 744 if (e == null) { 745 if (!options.contains(StandardOpenOption.CREATE) && 746 !options.contains(StandardOpenOption.CREATE_NEW)) { 747 throw new NoSuchFileException(getString(path)); 748 } 749 } else { 750 if (options.contains(StandardOpenOption.CREATE_NEW)) { 751 throw new FileAlreadyExistsException(getString(path)); 752 } 753 if (e.isDir()) 754 throw new FileAlreadyExistsException("directory <" 755 + getString(path) + "> exists"); 756 } 757 options = new HashSet<>(options); 758 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 759 } else if (e == null || e.isDir()) { 760 throw new NoSuchFileException(getString(path)); 761 } 762 763 final boolean isFCH = (e != null && e.type == Entry.FILECH); 764 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 765 final FileChannel fch = tmpfile.getFileSystem() 766 .provider() 767 .newFileChannel(tmpfile, options, attrs); 768 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH, attrs); 769 if (forWrite) { 770 u.flag = FLAG_DATADESCR; 771 u.method = getCompressMethod(attrs); 772 } 773 // is there a better way to hook into the FileChannel's close method? 774 return new FileChannel() { 775 public int write(ByteBuffer src) throws IOException { 776 return fch.write(src); 777 } 778 public long write(ByteBuffer[] srcs, int offset, int length) 779 throws IOException 780 { 781 return fch.write(srcs, offset, length); 782 } 783 public long position() throws IOException { 784 return fch.position(); 785 } 786 public FileChannel position(long newPosition) 787 throws IOException 788 { 789 fch.position(newPosition); 790 return this; 791 } 792 public long size() throws IOException { 793 return fch.size(); 794 } 795 public FileChannel truncate(long size) 796 throws IOException 797 { 798 fch.truncate(size); 799 return this; 800 } 801 public void force(boolean metaData) 802 throws IOException 803 { 804 fch.force(metaData); 805 } 806 public long transferTo(long position, long count, 807 WritableByteChannel target) 808 throws IOException 809 { 810 return fch.transferTo(position, count, target); 811 } 812 public long transferFrom(ReadableByteChannel src, 813 long position, long count) 814 throws IOException 815 { 816 return fch.transferFrom(src, position, count); 817 } 818 public int read(ByteBuffer dst) throws IOException { 819 return fch.read(dst); 820 } 821 public int read(ByteBuffer dst, long position) 822 throws IOException 823 { 824 return fch.read(dst, position); 825 } 826 public long read(ByteBuffer[] dsts, int offset, int length) 827 throws IOException 828 { 829 return fch.read(dsts, offset, length); 830 } 831 public int write(ByteBuffer src, long position) 832 throws IOException 833 { 834 return fch.write(src, position); 835 } 836 public MappedByteBuffer map(MapMode mode, 837 long position, long size) 838 throws IOException 839 { 840 throw new UnsupportedOperationException(); 841 } 842 public FileLock lock(long position, long size, boolean shared) 843 throws IOException 844 { 845 return fch.lock(position, size, shared); 846 } 847 public FileLock tryLock(long position, long size, boolean shared) 848 throws IOException 849 { 850 return fch.tryLock(position, size, shared); 851 } 852 protected void implCloseChannel() throws IOException { 853 fch.close(); 854 if (forWrite) { 855 u.mtime = System.currentTimeMillis(); 856 u.size = Files.size(u.file); 857 858 update(u); 859 } else { 860 if (!isFCH) // if this is a new fch for reading 861 removeTempPathForEntry(tmpfile); 862 } 863 } 864 }; 865 } finally { 866 endRead(); 867 } 868 } 869 870 // the outstanding input streams that need to be closed 871 private Set<InputStream> streams = 872 Collections.synchronizedSet(new HashSet<InputStream>()); 873 874 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 875 private Path getTempPathForEntry(byte[] path) throws IOException { 876 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 877 if (path != null) { 878 Entry e = getEntry(path); 879 if (e != null) { 880 try (InputStream is = newInputStream(path)) { 881 Files.copy(is, tmpPath, REPLACE_EXISTING); 882 } 883 } 884 } 885 return tmpPath; 886 } 887 888 private void removeTempPathForEntry(Path path) throws IOException { 889 Files.delete(path); 890 tmppaths.remove(path); 891 } 892 893 // check if all parents really exit. ZIP spec does not require 894 // the existence of any "parent directory". 895 private void checkParents(byte[] path) throws IOException { 896 beginRead(); 897 try { 898 while ((path = getParent(path)) != null && 899 path != ROOTPATH) { 900 if (!inodes.containsKey(IndexNode.keyOf(path))) { 901 throw new NoSuchFileException(getString(path)); 902 } 903 } 904 } finally { 905 endRead(); 906 } 907 } 908 909 private static byte[] ROOTPATH = new byte[] { '/' }; 910 private static byte[] getParent(byte[] path) { 911 int off = getParentOff(path); 912 if (off <= 1) 913 return ROOTPATH; 914 return Arrays.copyOf(path, off); 915 } 916 917 private static int getParentOff(byte[] path) { 918 int off = path.length - 1; 919 if (off > 0 && path[off] == '/') // isDirectory 920 off--; 921 while (off > 0 && path[off] != '/') { off--; } 922 return off; 923 } 924 925 private final void beginWrite() { 926 rwlock.writeLock().lock(); 927 } 928 929 private final void endWrite() { 930 rwlock.writeLock().unlock(); 931 } 932 933 private final void beginRead() { 934 rwlock.readLock().lock(); 935 } 936 937 private final void endRead() { 938 rwlock.readLock().unlock(); 939 } 940 941 /////////////////////////////////////////////////////////////////// 942 943 private volatile boolean isOpen = true; 944 private final SeekableByteChannel ch; // channel to the zipfile 945 final byte[] cen; // CEN & ENDHDR 946 private END end; 947 private long locpos; // position of first LOC header (usually 0) 948 949 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 950 951 // name -> pos (in cen), IndexNode itself can be used as a "key" 952 private LinkedHashMap<IndexNode, IndexNode> inodes; 953 954 final byte[] getBytes(String name) { 955 return zc.getBytes(name); 956 } 957 958 final String getString(byte[] name) { 959 return zc.toString(name); 960 } 961 962 @SuppressWarnings("deprecation") 963 protected void finalize() throws IOException { 964 close(); 965 } 966 967 // Reads len bytes of data from the specified offset into buf. 968 // Returns the total number of bytes read. 969 // Each/every byte read from here (except the cen, which is mapped). 970 final long readFullyAt(byte[] buf, int off, long len, long pos) 971 throws IOException 972 { 973 ByteBuffer bb = ByteBuffer.wrap(buf); 974 bb.position(off); 975 bb.limit((int)(off + len)); 976 return readFullyAt(bb, pos); 977 } 978 979 private final long readFullyAt(ByteBuffer bb, long pos) 980 throws IOException 981 { 982 synchronized(ch) { 983 return ch.position(pos).read(bb); 984 } 985 } 986 987 // Searches for end of central directory (END) header. The contents of 988 // the END header will be read and placed in endbuf. Returns the file 989 // position of the END header, otherwise returns -1 if the END header 990 // was not found or an error occurred. 991 private END findEND() throws IOException 992 { 993 byte[] buf = new byte[READBLOCKSZ]; 994 long ziplen = ch.size(); 995 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 996 long minPos = minHDR - (buf.length - ENDHDR); 997 998 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 999 { 1000 int off = 0; 1001 if (pos < 0) { 1002 // Pretend there are some NUL bytes before start of file 1003 off = (int)-pos; 1004 Arrays.fill(buf, 0, off, (byte)0); 1005 } 1006 int len = buf.length - off; 1007 if (readFullyAt(buf, off, len, pos + off) != len) 1008 zerror("zip END header not found"); 1009 1010 // Now scan the block backwards for END header signature 1011 for (int i = buf.length - ENDHDR; i >= 0; i--) { 1012 if (buf[i+0] == (byte)'P' && 1013 buf[i+1] == (byte)'K' && 1014 buf[i+2] == (byte)'\005' && 1015 buf[i+3] == (byte)'\006' && 1016 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 1017 // Found END header 1018 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 1019 END end = new END(); 1020 end.endsub = ENDSUB(buf); 1021 end.centot = ENDTOT(buf); 1022 end.cenlen = ENDSIZ(buf); 1023 end.cenoff = ENDOFF(buf); 1024 end.comlen = ENDCOM(buf); 1025 end.endpos = pos + i; 1026 // try if there is zip64 end; 1027 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1028 if (end.endpos < ZIP64_LOCHDR || 1029 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1030 != loc64.length || 1031 !locator64SigAt(loc64, 0)) { 1032 return end; 1033 } 1034 long end64pos = ZIP64_LOCOFF(loc64); 1035 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1036 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1037 != end64buf.length || 1038 !end64SigAt(end64buf, 0)) { 1039 return end; 1040 } 1041 // end64 found, 1042 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1043 long cenoff64 = ZIP64_ENDOFF(end64buf); 1044 long centot64 = ZIP64_ENDTOT(end64buf); 1045 // double-check 1046 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1047 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1048 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1049 return end; 1050 } 1051 // to use the end64 values 1052 end.cenlen = cenlen64; 1053 end.cenoff = cenoff64; 1054 end.centot = (int)centot64; // assume total < 2g 1055 end.endpos = end64pos; 1056 return end; 1057 } 1058 } 1059 } 1060 zerror("zip END header not found"); 1061 return null; //make compiler happy 1062 } 1063 1064 // Reads zip file central directory. Returns the file position of first 1065 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1066 // then the error was a zip format error and zip->msg has the error text. 1067 // Always pass in -1 for knownTotal; it's used for a recursive call. 1068 private byte[] initCEN() throws IOException { 1069 end = findEND(); 1070 if (end.endpos == 0) { 1071 inodes = new LinkedHashMap<>(10); 1072 locpos = 0; 1073 buildNodeTree(); 1074 return null; // only END header present 1075 } 1076 if (end.cenlen > end.endpos) 1077 zerror("invalid END header (bad central directory size)"); 1078 long cenpos = end.endpos - end.cenlen; // position of CEN table 1079 1080 // Get position of first local file (LOC) header, taking into 1081 // account that there may be a stub prefixed to the zip file. 1082 locpos = cenpos - end.cenoff; 1083 if (locpos < 0) 1084 zerror("invalid END header (bad central directory offset)"); 1085 1086 // read in the CEN and END 1087 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1088 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1089 zerror("read CEN tables failed"); 1090 } 1091 // Iterate through the entries in the central directory 1092 inodes = new LinkedHashMap<>(end.centot + 1); 1093 int pos = 0; 1094 int limit = cen.length - ENDHDR; 1095 while (pos < limit) { 1096 if (!cenSigAt(cen, pos)) 1097 zerror("invalid CEN header (bad signature)"); 1098 int method = CENHOW(cen, pos); 1099 int nlen = CENNAM(cen, pos); 1100 int elen = CENEXT(cen, pos); 1101 int clen = CENCOM(cen, pos); 1102 if ((CENFLG(cen, pos) & 1) != 0) { 1103 zerror("invalid CEN header (encrypted entry)"); 1104 } 1105 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1106 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1107 } 1108 if (pos + CENHDR + nlen > limit) { 1109 zerror("invalid CEN header (bad header size)"); 1110 } 1111 IndexNode inode = new IndexNode(cen, pos, nlen); 1112 inodes.put(inode, inode); 1113 1114 // skip ext and comment 1115 pos += (CENHDR + nlen + elen + clen); 1116 } 1117 if (pos + ENDHDR != cen.length) { 1118 zerror("invalid CEN header (bad header size)"); 1119 } 1120 buildNodeTree(); 1121 return cen; 1122 } 1123 1124 private void ensureOpen() throws IOException { 1125 if (!isOpen) 1126 throw new ClosedFileSystemException(); 1127 } 1128 1129 // Creates a new empty temporary file in the same directory as the 1130 // specified file. A variant of Files.createTempFile. 1131 private Path createTempFileInSameDirectoryAs(Path path) 1132 throws IOException 1133 { 1134 Path parent = path.toAbsolutePath().getParent(); 1135 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1136 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1137 tmppaths.add(tmpPath); 1138 return tmpPath; 1139 } 1140 1141 ////////////////////update & sync ////////////////////////////////////// 1142 1143 private boolean hasUpdate = false; 1144 1145 // shared key. consumer guarantees the "writeLock" before use it. 1146 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1147 1148 private void updateDelete(IndexNode inode) { 1149 beginWrite(); 1150 try { 1151 removeFromTree(inode); 1152 inodes.remove(inode); 1153 hasUpdate = true; 1154 } finally { 1155 endWrite(); 1156 } 1157 } 1158 1159 private void update(Entry e) { 1160 beginWrite(); 1161 try { 1162 IndexNode old = inodes.put(e, e); 1163 if (old != null) { 1164 removeFromTree(old); 1165 } 1166 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1167 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1168 e.sibling = parent.child; 1169 parent.child = e; 1170 } 1171 hasUpdate = true; 1172 } finally { 1173 endWrite(); 1174 } 1175 } 1176 1177 // copy over the whole LOC entry (header if necessary, data and ext) from 1178 // old zip to the new one. 1179 private long copyLOCEntry(Entry e, boolean updateHeader, 1180 OutputStream os, 1181 long written, byte[] buf) 1182 throws IOException 1183 { 1184 long locoff = e.locoff; // where to read 1185 e.locoff = written; // update the e.locoff with new value 1186 1187 // calculate the size need to write out 1188 long size = 0; 1189 // if there is A ext 1190 if ((e.flag & FLAG_DATADESCR) != 0) { 1191 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1192 size = 24; 1193 else 1194 size = 16; 1195 } 1196 // read loc, use the original loc.elen/nlen 1197 // 1198 // an extra byte after loc is read, which should be the first byte of the 1199 // 'name' field of the loc. if this byte is '/', which means the original 1200 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1201 // is used to output the loc, in which the leading "/" will be removed 1202 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1203 throw new ZipException("loc: reading failed"); 1204 1205 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1206 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1207 size += e.csize; 1208 written = e.writeLOC(os) + size; 1209 } else { 1210 os.write(buf, 0, LOCHDR); // write out the loc header 1211 locoff += LOCHDR; 1212 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1213 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1214 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1215 written = LOCHDR + size; 1216 } 1217 int n; 1218 while (size > 0 && 1219 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1220 { 1221 if (size < n) 1222 n = (int)size; 1223 os.write(buf, 0, n); 1224 size -= n; 1225 locoff += n; 1226 } 1227 return written; 1228 } 1229 1230 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1231 throws IOException { 1232 1233 if (e.bytes == null && e.file == null) // dir, 0-length data 1234 return 0; 1235 1236 long written = 0; 1237 try (OutputStream os2 = e.method == METHOD_STORED ? 1238 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1239 if (e.bytes != null) { // in-memory 1240 os2.write(e.bytes, 0, e.bytes.length); 1241 } else if (e.file != null) { // tmp file 1242 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1243 try (InputStream is = Files.newInputStream(e.file)) { 1244 is.transferTo(os2); 1245 } 1246 } 1247 Files.delete(e.file); 1248 tmppaths.remove(e.file); 1249 } 1250 } 1251 written += e.csize; 1252 if ((e.flag & FLAG_DATADESCR) != 0) { 1253 written += e.writeEXT(os); 1254 } 1255 return written; 1256 } 1257 1258 // sync the zip file system, if there is any udpate 1259 private void sync() throws IOException { 1260 1261 if (!hasUpdate) 1262 return; 1263 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1264 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1265 { 1266 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1267 long written = 0; 1268 byte[] buf = new byte[8192]; 1269 Entry e = null; 1270 1271 // write loc 1272 for (IndexNode inode : inodes.values()) { 1273 if (inode instanceof Entry) { // an updated inode 1274 e = (Entry)inode; 1275 try { 1276 if (e.type == Entry.COPY) { 1277 // entry copy: the only thing changed is the "name" 1278 // and "nlen" in LOC header, so we udpate/rewrite the 1279 // LOC in new file and simply copy the rest (data and 1280 // ext) without enflating/deflating from the old zip 1281 // file LOC entry. 1282 written += copyLOCEntry(e, true, os, written, buf); 1283 } else { // NEW, FILECH or CEN 1284 e.locoff = written; 1285 written += e.writeLOC(os); // write loc header 1286 written += writeEntry(e, os, buf); 1287 } 1288 elist.add(e); 1289 } catch (IOException x) { 1290 x.printStackTrace(); // skip any in-accurate entry 1291 } 1292 } else { // unchanged inode 1293 if (inode.pos == -1) { 1294 continue; // pseudo directory node 1295 } 1296 if (inode.name.length == 1 && inode.name[0] == '/') { 1297 continue; // no root '/' directory even it 1298 // exits in original zip/jar file. 1299 } 1300 e = Entry.readCEN(this, inode); 1301 try { 1302 written += copyLOCEntry(e, false, os, written, buf); 1303 elist.add(e); 1304 } catch (IOException x) { 1305 x.printStackTrace(); // skip any wrong entry 1306 } 1307 } 1308 } 1309 1310 // now write back the cen and end table 1311 end.cenoff = written; 1312 for (Entry entry : elist) { 1313 written += entry.writeCEN(os); 1314 } 1315 end.centot = elist.size(); 1316 end.cenlen = written - end.cenoff; 1317 end.write(os, written, forceEnd64); 1318 } 1319 1320 ch.close(); 1321 Files.delete(zfpath); 1322 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1323 hasUpdate = false; // clear 1324 } 1325 1326 IndexNode getInode(byte[] path) { 1327 if (path == null) 1328 throw new NullPointerException("path"); 1329 return inodes.get(IndexNode.keyOf(path)); 1330 } 1331 1332 Entry getEntry(byte[] path) throws IOException { 1333 IndexNode inode = getInode(path); 1334 if (inode instanceof Entry) 1335 return (Entry)inode; 1336 if (inode == null || inode.pos == -1) 1337 return null; 1338 return Entry.readCEN(this, inode); 1339 } 1340 1341 public void deleteFile(byte[] path, boolean failIfNotExists) 1342 throws IOException 1343 { 1344 checkWritable(); 1345 1346 IndexNode inode = getInode(path); 1347 if (inode == null) { 1348 if (path != null && path.length == 0) 1349 throw new ZipException("root directory </> can't not be delete"); 1350 if (failIfNotExists) 1351 throw new NoSuchFileException(getString(path)); 1352 } else { 1353 if (inode.isDir() && inode.child != null) 1354 throw new DirectoryNotEmptyException(getString(path)); 1355 updateDelete(inode); 1356 } 1357 } 1358 1359 // Returns an out stream for either 1360 // (1) writing the contents of a new entry, if the entry exits, or 1361 // (2) updating/replacing the contents of the specified existing entry. 1362 private OutputStream getOutputStream(Entry e) throws IOException { 1363 1364 if (e.mtime == -1) 1365 e.mtime = System.currentTimeMillis(); 1366 if (e.method == -1) 1367 e.method = defaultMethod; 1368 // store size, compressed size, and crc-32 in datadescr 1369 e.flag = FLAG_DATADESCR; 1370 if (zc.isUTF8()) 1371 e.flag |= FLAG_USE_UTF8; 1372 OutputStream os; 1373 if (useTempFile) { 1374 e.file = getTempPathForEntry(null); 1375 os = Files.newOutputStream(e.file, WRITE); 1376 } else { 1377 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1378 } 1379 return new EntryOutputStream(e, os); 1380 } 1381 1382 private class EntryOutputStream extends FilterOutputStream { 1383 private Entry e; 1384 private long written; 1385 private boolean isClosed; 1386 1387 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1388 super(os); 1389 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1390 // this.written = 0; 1391 } 1392 1393 @Override 1394 public synchronized void write(int b) throws IOException { 1395 out.write(b); 1396 written += 1; 1397 } 1398 1399 @Override 1400 public synchronized void write(byte b[], int off, int len) 1401 throws IOException { 1402 out.write(b, off, len); 1403 written += len; 1404 } 1405 1406 @Override 1407 public synchronized void close() throws IOException { 1408 if (isClosed) { 1409 return; 1410 } 1411 isClosed = true; 1412 e.size = written; 1413 if (out instanceof ByteArrayOutputStream) 1414 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1415 super.close(); 1416 update(e); 1417 } 1418 } 1419 1420 // Wrapper output stream class to write out a "stored" entry. 1421 // (1) this class does not close the underlying out stream when 1422 // being closed. 1423 // (2) no need to be "synchronized", only used by sync() 1424 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1425 private Entry e; 1426 private CRC32 crc; 1427 private long written; 1428 private boolean isClosed; 1429 1430 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1431 super(os); 1432 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1433 this.crc = new CRC32(); 1434 } 1435 1436 @Override 1437 public void write(int b) throws IOException { 1438 out.write(b); 1439 crc.update(b); 1440 written += 1; 1441 } 1442 1443 @Override 1444 public void write(byte b[], int off, int len) 1445 throws IOException { 1446 out.write(b, off, len); 1447 crc.update(b, off, len); 1448 written += len; 1449 } 1450 1451 @Override 1452 public void close() throws IOException { 1453 if (isClosed) 1454 return; 1455 isClosed = true; 1456 e.size = e.csize = written; 1457 e.crc = crc.getValue(); 1458 } 1459 } 1460 1461 // Wrapper output stream class to write out a "deflated" entry. 1462 // (1) this class does not close the underlying out stream when 1463 // being closed. 1464 // (2) no need to be "synchronized", only used by sync() 1465 private class EntryOutputStreamDef extends DeflaterOutputStream { 1466 private CRC32 crc; 1467 private Entry e; 1468 private boolean isClosed; 1469 1470 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1471 super(os, getDeflater()); 1472 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1473 this.crc = new CRC32(); 1474 } 1475 1476 @Override 1477 public void write(byte b[], int off, int len) 1478 throws IOException { 1479 super.write(b, off, len); 1480 crc.update(b, off, len); 1481 } 1482 1483 @Override 1484 public void close() throws IOException { 1485 if (isClosed) 1486 return; 1487 isClosed = true; 1488 finish(); 1489 e.size = def.getBytesRead(); 1490 e.csize = def.getBytesWritten(); 1491 e.crc = crc.getValue(); 1492 releaseDeflater(def); 1493 } 1494 } 1495 1496 private InputStream getInputStream(Entry e) 1497 throws IOException 1498 { 1499 InputStream eis = null; 1500 1501 if (e.type == Entry.NEW) { 1502 // now bytes & file is uncompressed. 1503 if (e.bytes != null) 1504 return new ByteArrayInputStream(e.bytes); 1505 else if (e.file != null) 1506 return Files.newInputStream(e.file); 1507 else 1508 throw new ZipException("update entry data is missing"); 1509 } else if (e.type == Entry.FILECH) { 1510 // FILECH result is un-compressed. 1511 eis = Files.newInputStream(e.file); 1512 // TBD: wrap to hook close() 1513 // streams.add(eis); 1514 return eis; 1515 } else { // untouched CEN or COPY 1516 eis = new EntryInputStream(e, ch); 1517 } 1518 if (e.method == METHOD_DEFLATED) { 1519 // MORE: Compute good size for inflater stream: 1520 long bufSize = e.size + 2; // Inflater likes a bit of slack 1521 if (bufSize > 65536) 1522 bufSize = 8192; 1523 final long size = e.size; 1524 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1525 private boolean isClosed = false; 1526 public void close() throws IOException { 1527 if (!isClosed) { 1528 releaseInflater(inf); 1529 this.in.close(); 1530 isClosed = true; 1531 streams.remove(this); 1532 } 1533 } 1534 // Override fill() method to provide an extra "dummy" byte 1535 // at the end of the input stream. This is required when 1536 // using the "nowrap" Inflater option. (it appears the new 1537 // zlib in 7 does not need it, but keep it for now) 1538 protected void fill() throws IOException { 1539 if (eof) { 1540 throw new EOFException( 1541 "Unexpected end of ZLIB input stream"); 1542 } 1543 len = this.in.read(buf, 0, buf.length); 1544 if (len == -1) { 1545 buf[0] = 0; 1546 len = 1; 1547 eof = true; 1548 } 1549 inf.setInput(buf, 0, len); 1550 } 1551 private boolean eof; 1552 1553 public int available() throws IOException { 1554 if (isClosed) 1555 return 0; 1556 long avail = size - inf.getBytesWritten(); 1557 return avail > (long) Integer.MAX_VALUE ? 1558 Integer.MAX_VALUE : (int) avail; 1559 } 1560 }; 1561 } else if (e.method == METHOD_STORED) { 1562 // TBD: wrap/ it does not seem necessary 1563 } else { 1564 throw new ZipException("invalid compression method"); 1565 } 1566 streams.add(eis); 1567 return eis; 1568 } 1569 1570 // Inner class implementing the input stream used to read 1571 // a (possibly compressed) zip file entry. 1572 private class EntryInputStream extends InputStream { 1573 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1574 // point to a new channel after sync() 1575 private long pos; // current position within entry data 1576 protected long rem; // number of remaining bytes within entry 1577 1578 EntryInputStream(Entry e, SeekableByteChannel zfch) 1579 throws IOException 1580 { 1581 this.zfch = zfch; 1582 rem = e.csize; 1583 pos = e.locoff; 1584 if (pos == -1) { 1585 Entry e2 = getEntry(e.name); 1586 if (e2 == null) { 1587 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1588 } 1589 pos = e2.locoff; 1590 } 1591 pos = -pos; // lazy initialize the real data offset 1592 } 1593 1594 public int read(byte b[], int off, int len) throws IOException { 1595 ensureOpen(); 1596 initDataPos(); 1597 if (rem == 0) { 1598 return -1; 1599 } 1600 if (len <= 0) { 1601 return 0; 1602 } 1603 if (len > rem) { 1604 len = (int) rem; 1605 } 1606 // readFullyAt() 1607 long n = 0; 1608 ByteBuffer bb = ByteBuffer.wrap(b); 1609 bb.position(off); 1610 bb.limit(off + len); 1611 synchronized(zfch) { 1612 n = zfch.position(pos).read(bb); 1613 } 1614 if (n > 0) { 1615 pos += n; 1616 rem -= n; 1617 } 1618 if (rem == 0) { 1619 close(); 1620 } 1621 return (int)n; 1622 } 1623 1624 public int read() throws IOException { 1625 byte[] b = new byte[1]; 1626 if (read(b, 0, 1) == 1) { 1627 return b[0] & 0xff; 1628 } else { 1629 return -1; 1630 } 1631 } 1632 1633 public long skip(long n) throws IOException { 1634 ensureOpen(); 1635 if (n > rem) 1636 n = rem; 1637 pos += n; 1638 rem -= n; 1639 if (rem == 0) { 1640 close(); 1641 } 1642 return n; 1643 } 1644 1645 public int available() { 1646 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1647 } 1648 1649 public void close() { 1650 rem = 0; 1651 streams.remove(this); 1652 } 1653 1654 private void initDataPos() throws IOException { 1655 if (pos <= 0) { 1656 pos = -pos + locpos; 1657 byte[] buf = new byte[LOCHDR]; 1658 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1659 throw new ZipException("invalid loc " + pos + " for entry reading"); 1660 } 1661 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1662 } 1663 } 1664 } 1665 1666 static void zerror(String msg) throws ZipException { 1667 throw new ZipException(msg); 1668 } 1669 1670 // Maxmum number of de/inflater we cache 1671 private final int MAX_FLATER = 20; 1672 // List of available Inflater objects for decompression 1673 private final List<Inflater> inflaters = new ArrayList<>(); 1674 1675 // Gets an inflater from the list of available inflaters or allocates 1676 // a new one. 1677 private Inflater getInflater() { 1678 synchronized (inflaters) { 1679 int size = inflaters.size(); 1680 if (size > 0) { 1681 Inflater inf = inflaters.remove(size - 1); 1682 return inf; 1683 } else { 1684 return new Inflater(true); 1685 } 1686 } 1687 } 1688 1689 // Releases the specified inflater to the list of available inflaters. 1690 private void releaseInflater(Inflater inf) { 1691 synchronized (inflaters) { 1692 if (inflaters.size() < MAX_FLATER) { 1693 inf.reset(); 1694 inflaters.add(inf); 1695 } else { 1696 inf.end(); 1697 } 1698 } 1699 } 1700 1701 // List of available Deflater objects for compression 1702 private final List<Deflater> deflaters = new ArrayList<>(); 1703 1704 // Gets a deflater from the list of available deflaters or allocates 1705 // a new one. 1706 private Deflater getDeflater() { 1707 synchronized (deflaters) { 1708 int size = deflaters.size(); 1709 if (size > 0) { 1710 Deflater def = deflaters.remove(size - 1); 1711 return def; 1712 } else { 1713 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1714 } 1715 } 1716 } 1717 1718 // Releases the specified inflater to the list of available inflaters. 1719 private void releaseDeflater(Deflater def) { 1720 synchronized (deflaters) { 1721 if (inflaters.size() < MAX_FLATER) { 1722 def.reset(); 1723 deflaters.add(def); 1724 } else { 1725 def.end(); 1726 } 1727 } 1728 } 1729 1730 // End of central directory record 1731 static class END { 1732 // these 2 fields are not used by anyone and write() uses "0" 1733 // int disknum; 1734 // int sdisknum; 1735 int endsub; // endsub 1736 int centot; // 4 bytes 1737 long cenlen; // 4 bytes 1738 long cenoff; // 4 bytes 1739 int comlen; // comment length 1740 byte[] comment; 1741 1742 /* members of Zip64 end of central directory locator */ 1743 // int diskNum; 1744 long endpos; 1745 // int disktot; 1746 1747 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1748 boolean hasZip64 = forceEnd64; // false; 1749 long xlen = cenlen; 1750 long xoff = cenoff; 1751 if (xlen >= ZIP64_MINVAL) { 1752 xlen = ZIP64_MINVAL; 1753 hasZip64 = true; 1754 } 1755 if (xoff >= ZIP64_MINVAL) { 1756 xoff = ZIP64_MINVAL; 1757 hasZip64 = true; 1758 } 1759 int count = centot; 1760 if (count >= ZIP64_MINVAL32) { 1761 count = ZIP64_MINVAL32; 1762 hasZip64 = true; 1763 } 1764 if (hasZip64) { 1765 long off64 = offset; 1766 //zip64 end of central directory record 1767 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1768 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1769 writeShort(os, 45); // version made by 1770 writeShort(os, 45); // version needed to extract 1771 writeInt(os, 0); // number of this disk 1772 writeInt(os, 0); // central directory start disk 1773 writeLong(os, centot); // number of directory entries on disk 1774 writeLong(os, centot); // number of directory entries 1775 writeLong(os, cenlen); // length of central directory 1776 writeLong(os, cenoff); // offset of central directory 1777 1778 //zip64 end of central directory locator 1779 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1780 writeInt(os, 0); // zip64 END start disk 1781 writeLong(os, off64); // offset of zip64 END 1782 writeInt(os, 1); // total number of disks (?) 1783 } 1784 writeInt(os, ENDSIG); // END record signature 1785 writeShort(os, 0); // number of this disk 1786 writeShort(os, 0); // central directory start disk 1787 writeShort(os, count); // number of directory entries on disk 1788 writeShort(os, count); // total number of directory entries 1789 writeInt(os, xlen); // length of central directory 1790 writeInt(os, xoff); // offset of central directory 1791 if (comment != null) { // zip file comment 1792 writeShort(os, comment.length); 1793 writeBytes(os, comment); 1794 } else { 1795 writeShort(os, 0); 1796 } 1797 } 1798 } 1799 1800 // Internal node that links a "name" to its pos in cen table. 1801 // The node itself can be used as a "key" to lookup itself in 1802 // the HashMap inodes. 1803 static class IndexNode { 1804 byte[] name; 1805 int hashcode; // node is hashable/hashed by its name 1806 int pos = -1; // position in cen table, -1 menas the 1807 // entry does not exists in zip file 1808 boolean isdir; 1809 1810 IndexNode(byte[] name, boolean isdir) { 1811 name(name); 1812 this.isdir = isdir; 1813 this.pos = -1; 1814 } 1815 1816 IndexNode(byte[] name, int pos) { 1817 name(name); 1818 this.pos = pos; 1819 } 1820 1821 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1822 IndexNode(byte[] cen, int pos, int nlen) { 1823 int noff = pos + CENHDR; 1824 if (cen[noff + nlen - 1] == '/') { 1825 isdir = true; 1826 nlen--; 1827 } 1828 if (nlen > 0 && cen[noff] == '/') { 1829 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1830 } else { 1831 name = new byte[nlen + 1]; 1832 System.arraycopy(cen, noff, name, 1, nlen); 1833 name[0] = '/'; 1834 } 1835 name(name); 1836 this.pos = pos; 1837 } 1838 1839 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1840 1841 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1842 IndexNode key = cachedKey.get(); 1843 if (key == null) { 1844 key = new IndexNode(name, -1); 1845 cachedKey.set(key); 1846 } 1847 return key.as(name); 1848 } 1849 1850 final void name(byte[] name) { 1851 this.name = name; 1852 this.hashcode = Arrays.hashCode(name); 1853 } 1854 1855 final IndexNode as(byte[] name) { // reuse the node, mostly 1856 name(name); // as a lookup "key" 1857 return this; 1858 } 1859 1860 boolean isDir() { 1861 return isdir; 1862 } 1863 1864 public boolean equals(Object other) { 1865 if (!(other instanceof IndexNode)) { 1866 return false; 1867 } 1868 if (other instanceof ParentLookup) { 1869 return ((ParentLookup)other).equals(this); 1870 } 1871 return Arrays.equals(name, ((IndexNode)other).name); 1872 } 1873 1874 public int hashCode() { 1875 return hashcode; 1876 } 1877 1878 IndexNode() {} 1879 IndexNode sibling; 1880 IndexNode child; // 1st child 1881 } 1882 1883 static class Entry extends IndexNode implements ZipFileAttributes { 1884 1885 static final int CEN = 1; // entry read from cen 1886 static final int NEW = 2; // updated contents in bytes or file 1887 static final int FILECH = 3; // fch update in "file" 1888 static final int COPY = 4; // copy of a CEN entry 1889 1890 byte[] bytes; // updated content bytes 1891 Path file; // use tmp file to store bytes; 1892 int type = CEN; // default is the entry read from cen 1893 1894 // entry attributes 1895 int version; 1896 int flag; 1897 int posixPerms = -1; // posix permissions 1898 int method = -1; // compression method 1899 long mtime = -1; // last modification time (in DOS time) 1900 long atime = -1; // last access time 1901 long ctime = -1; // create time 1902 long crc = -1; // crc-32 of entry data 1903 long csize = -1; // compressed size of entry data 1904 long size = -1; // uncompressed size of entry data 1905 byte[] extra; 1906 1907 // cen 1908 1909 // these fields are not used by anyone and writeCEN uses "0" 1910 // int versionMade; 1911 // int disk; 1912 // int attrs; 1913 // long attrsEx; 1914 long locoff; 1915 byte[] comment; 1916 1917 Entry() {} 1918 1919 Entry(byte[] name, boolean isdir, int method) { 1920 name(name); 1921 this.isdir = isdir; 1922 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1923 this.crc = 0; 1924 this.size = 0; 1925 this.csize = 0; 1926 this.method = method; 1927 } 1928 1929 @SuppressWarnings("unchecked") 1930 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 1931 this(name, isdir, method); 1932 this.type = type; 1933 for (FileAttribute<?> attr : attrs) { 1934 String attrName = attr.name(); 1935 if (attrName.equals("posix:permissions") || attrName.equals("unix:permissions")) { 1936 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 1937 } 1938 } 1939 } 1940 1941 Entry(Entry e, int type) { 1942 name(e.name); 1943 this.isdir = e.isdir; 1944 this.version = e.version; 1945 this.ctime = e.ctime; 1946 this.atime = e.atime; 1947 this.mtime = e.mtime; 1948 this.crc = e.crc; 1949 this.size = e.size; 1950 this.csize = e.csize; 1951 this.method = e.method; 1952 this.extra = e.extra; 1953 /* 1954 this.versionMade = e.versionMade; 1955 this.disk = e.disk; 1956 this.attrs = e.attrs; 1957 this.attrsEx = e.attrsEx; 1958 */ 1959 this.locoff = e.locoff; 1960 this.comment = e.comment; 1961 this.posixPerms = e.posixPerms; 1962 this.type = type; 1963 } 1964 1965 @SuppressWarnings("unchecked") 1966 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 1967 this(name, type, false, METHOD_STORED); 1968 this.file = file; 1969 for (FileAttribute<?> attr : attrs) { 1970 String attrName = attr.name(); 1971 if (attrName.equals("posix:permissions") || attrName.equals("unix:permissions")) { 1972 posixPerms = ZipUtils.permsToFlags((Set<PosixFilePermission>)attr.value()); 1973 } 1974 } 1975 } 1976 1977 int version(boolean zip64) throws ZipException { 1978 if (zip64) { 1979 return 45; 1980 } 1981 if (method == METHOD_DEFLATED) 1982 return 20; 1983 else if (method == METHOD_STORED) 1984 return 10; 1985 throw new ZipException("unsupported compression method"); 1986 } 1987 1988 /** 1989 * Adds information about compatibility of file attribute information 1990 * to a version value. 1991 */ 1992 int versionMadeBy(int version) { 1993 return (posixPerms < 0) ? version : 1994 VERSION_BASE_UNIX | (version & 0xff); 1995 } 1996 1997 ///////////////////// CEN ////////////////////// 1998 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1999 throws IOException 2000 { 2001 return new Entry().cen(zipfs, inode); 2002 } 2003 2004 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 2005 throws IOException 2006 { 2007 byte[] cen = zipfs.cen; 2008 int pos = inode.pos; 2009 if (!cenSigAt(cen, pos)) 2010 zerror("invalid CEN header (bad signature)"); 2011 version = CENVER(cen, pos); 2012 flag = CENFLG(cen, pos); 2013 method = CENHOW(cen, pos); 2014 mtime = dosToJavaTime(CENTIM(cen, pos)); 2015 crc = CENCRC(cen, pos); 2016 csize = CENSIZ(cen, pos); 2017 size = CENLEN(cen, pos); 2018 int nlen = CENNAM(cen, pos); 2019 int elen = CENEXT(cen, pos); 2020 int clen = CENCOM(cen, pos); 2021 /* 2022 versionMade = CENVEM(cen, pos); 2023 disk = CENDSK(cen, pos); 2024 attrs = CENATT(cen, pos); 2025 attrsEx = CENATX(cen, pos); 2026 */ 2027 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 2028 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 2029 } 2030 locoff = CENOFF(cen, pos); 2031 pos += CENHDR; 2032 this.name = inode.name; 2033 this.isdir = inode.isdir; 2034 this.hashcode = inode.hashcode; 2035 2036 pos += nlen; 2037 if (elen > 0) { 2038 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2039 pos += elen; 2040 readExtra(zipfs); 2041 } 2042 if (clen > 0) { 2043 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2044 } 2045 return this; 2046 } 2047 2048 int writeCEN(OutputStream os) throws IOException { 2049 long csize0 = csize; 2050 long size0 = size; 2051 long locoff0 = locoff; 2052 int elen64 = 0; // extra for ZIP64 2053 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2054 int elenEXTT = 0; // extra for Extended Timestamp 2055 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2056 2057 byte[] zname = isdir ? toDirectoryPath(name) : name; 2058 2059 // confirm size/length 2060 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2061 int elen = (extra != null) ? extra.length : 0; 2062 int eoff = 0; 2063 int clen = (comment != null) ? comment.length : 0; 2064 if (csize >= ZIP64_MINVAL) { 2065 csize0 = ZIP64_MINVAL; 2066 elen64 += 8; // csize(8) 2067 } 2068 if (size >= ZIP64_MINVAL) { 2069 size0 = ZIP64_MINVAL; // size(8) 2070 elen64 += 8; 2071 } 2072 if (locoff >= ZIP64_MINVAL) { 2073 locoff0 = ZIP64_MINVAL; 2074 elen64 += 8; // offset(8) 2075 } 2076 if (elen64 != 0) { 2077 elen64 += 4; // header and data sz 4 bytes 2078 } 2079 boolean zip64 = (elen64 != 0); 2080 int version0 = version(zip64); 2081 while (eoff + 4 < elen) { 2082 int tag = SH(extra, eoff); 2083 int sz = SH(extra, eoff + 2); 2084 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2085 foundExtraTime = true; 2086 } 2087 eoff += (4 + sz); 2088 } 2089 if (!foundExtraTime) { 2090 if (isWindows) { // use NTFS 2091 elenNTFS = 36; // total 36 bytes 2092 } else { // Extended Timestamp otherwise 2093 elenEXTT = 9; // only mtime in cen 2094 } 2095 } 2096 writeInt(os, CENSIG); // CEN header signature 2097 writeShort(os, versionMadeBy(version0)); // version made by 2098 writeShort(os, version0); // version needed to extract 2099 writeShort(os, flag); // general purpose bit flag 2100 writeShort(os, method); // compression method 2101 // last modification time 2102 writeInt(os, (int)javaToDosTime(mtime)); 2103 writeInt(os, crc); // crc-32 2104 writeInt(os, csize0); // compressed size 2105 writeInt(os, size0); // uncompressed size 2106 writeShort(os, nlen); 2107 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2108 2109 if (comment != null) { 2110 writeShort(os, Math.min(clen, 0xffff)); 2111 } else { 2112 writeShort(os, 0); 2113 } 2114 writeShort(os, 0); // starting disk number 2115 writeShort(os, 0); // internal file attributes (unused) 2116 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2117 // attributes, used for storing posix 2118 // permissions 2119 writeInt(os, locoff0); // relative offset of local header 2120 writeBytes(os, zname, 1, nlen); 2121 if (zip64) { 2122 writeShort(os, EXTID_ZIP64);// Zip64 extra 2123 writeShort(os, elen64 - 4); // size of "this" extra block 2124 if (size0 == ZIP64_MINVAL) 2125 writeLong(os, size); 2126 if (csize0 == ZIP64_MINVAL) 2127 writeLong(os, csize); 2128 if (locoff0 == ZIP64_MINVAL) 2129 writeLong(os, locoff); 2130 } 2131 if (elenNTFS != 0) { 2132 writeShort(os, EXTID_NTFS); 2133 writeShort(os, elenNTFS - 4); 2134 writeInt(os, 0); // reserved 2135 writeShort(os, 0x0001); // NTFS attr tag 2136 writeShort(os, 24); 2137 writeLong(os, javaToWinTime(mtime)); 2138 writeLong(os, javaToWinTime(atime)); 2139 writeLong(os, javaToWinTime(ctime)); 2140 } 2141 if (elenEXTT != 0) { 2142 writeShort(os, EXTID_EXTT); 2143 writeShort(os, elenEXTT - 4); 2144 if (ctime == -1) 2145 os.write(0x3); // mtime and atime 2146 else 2147 os.write(0x7); // mtime, atime and ctime 2148 writeInt(os, javaToUnixTime(mtime)); 2149 } 2150 if (extra != null) // whatever not recognized 2151 writeBytes(os, extra); 2152 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2153 writeBytes(os, comment); 2154 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2155 } 2156 2157 ///////////////////// LOC ////////////////////// 2158 2159 int writeLOC(OutputStream os) throws IOException { 2160 byte[] zname = isdir ? toDirectoryPath(name) : name; 2161 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2162 int elen = (extra != null) ? extra.length : 0; 2163 boolean foundExtraTime = false; // if extra timestamp present 2164 int eoff = 0; 2165 int elen64 = 0; 2166 boolean zip64 = false; 2167 int elenEXTT = 0; 2168 int elenNTFS = 0; 2169 writeInt(os, LOCSIG); // LOC header signature 2170 if ((flag & FLAG_DATADESCR) != 0) { 2171 writeShort(os, version(zip64)); // version needed to extract 2172 writeShort(os, flag); // general purpose bit flag 2173 writeShort(os, method); // compression method 2174 // last modification time 2175 writeInt(os, (int)javaToDosTime(mtime)); 2176 // store size, uncompressed size, and crc-32 in data descriptor 2177 // immediately following compressed entry data 2178 writeInt(os, 0); 2179 writeInt(os, 0); 2180 writeInt(os, 0); 2181 } else { 2182 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2183 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2184 zip64 = true; 2185 } 2186 writeShort(os, version(zip64)); // version needed to extract 2187 writeShort(os, flag); // general purpose bit flag 2188 writeShort(os, method); // compression method 2189 // last modification time 2190 writeInt(os, (int)javaToDosTime(mtime)); 2191 writeInt(os, crc); // crc-32 2192 if (zip64) { 2193 writeInt(os, ZIP64_MINVAL); 2194 writeInt(os, ZIP64_MINVAL); 2195 } else { 2196 writeInt(os, csize); // compressed size 2197 writeInt(os, size); // uncompressed size 2198 } 2199 } 2200 while (eoff + 4 < elen) { 2201 int tag = SH(extra, eoff); 2202 int sz = SH(extra, eoff + 2); 2203 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2204 foundExtraTime = true; 2205 } 2206 eoff += (4 + sz); 2207 } 2208 if (!foundExtraTime) { 2209 if (isWindows) { 2210 elenNTFS = 36; // NTFS, total 36 bytes 2211 } else { // on unix use "ext time" 2212 elenEXTT = 9; 2213 if (atime != -1) 2214 elenEXTT += 4; 2215 if (ctime != -1) 2216 elenEXTT += 4; 2217 } 2218 } 2219 writeShort(os, nlen); 2220 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2221 writeBytes(os, zname, 1, nlen); 2222 if (zip64) { 2223 writeShort(os, EXTID_ZIP64); 2224 writeShort(os, 16); 2225 writeLong(os, size); 2226 writeLong(os, csize); 2227 } 2228 if (elenNTFS != 0) { 2229 writeShort(os, EXTID_NTFS); 2230 writeShort(os, elenNTFS - 4); 2231 writeInt(os, 0); // reserved 2232 writeShort(os, 0x0001); // NTFS attr tag 2233 writeShort(os, 24); 2234 writeLong(os, javaToWinTime(mtime)); 2235 writeLong(os, javaToWinTime(atime)); 2236 writeLong(os, javaToWinTime(ctime)); 2237 } 2238 if (elenEXTT != 0) { 2239 writeShort(os, EXTID_EXTT); 2240 writeShort(os, elenEXTT - 4);// size for the folowing data block 2241 int fbyte = 0x1; 2242 if (atime != -1) // mtime and atime 2243 fbyte |= 0x2; 2244 if (ctime != -1) // mtime, atime and ctime 2245 fbyte |= 0x4; 2246 os.write(fbyte); // flags byte 2247 writeInt(os, javaToUnixTime(mtime)); 2248 if (atime != -1) 2249 writeInt(os, javaToUnixTime(atime)); 2250 if (ctime != -1) 2251 writeInt(os, javaToUnixTime(ctime)); 2252 } 2253 if (extra != null) { 2254 writeBytes(os, extra); 2255 } 2256 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2257 } 2258 2259 // Data Descriptor 2260 int writeEXT(OutputStream os) throws IOException { 2261 writeInt(os, EXTSIG); // EXT header signature 2262 writeInt(os, crc); // crc-32 2263 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2264 writeLong(os, csize); 2265 writeLong(os, size); 2266 return 24; 2267 } else { 2268 writeInt(os, csize); // compressed size 2269 writeInt(os, size); // uncompressed size 2270 return 16; 2271 } 2272 } 2273 2274 // read NTFS, UNIX and ZIP64 data from cen.extra 2275 void readExtra(ZipFileSystem zipfs) throws IOException { 2276 if (extra == null) 2277 return; 2278 int elen = extra.length; 2279 int off = 0; 2280 int newOff = 0; 2281 while (off + 4 < elen) { 2282 // extra spec: HeaderID+DataSize+Data 2283 int pos = off; 2284 int tag = SH(extra, pos); 2285 int sz = SH(extra, pos + 2); 2286 pos += 4; 2287 if (pos + sz > elen) // invalid data 2288 break; 2289 switch (tag) { 2290 case EXTID_ZIP64 : 2291 if (size == ZIP64_MINVAL) { 2292 if (pos + 8 > elen) // invalid zip64 extra 2293 break; // fields, just skip 2294 size = LL(extra, pos); 2295 pos += 8; 2296 } 2297 if (csize == ZIP64_MINVAL) { 2298 if (pos + 8 > elen) 2299 break; 2300 csize = LL(extra, pos); 2301 pos += 8; 2302 } 2303 if (locoff == ZIP64_MINVAL) { 2304 if (pos + 8 > elen) 2305 break; 2306 locoff = LL(extra, pos); 2307 pos += 8; 2308 } 2309 break; 2310 case EXTID_NTFS: 2311 if (sz < 32) 2312 break; 2313 pos += 4; // reserved 4 bytes 2314 if (SH(extra, pos) != 0x0001) 2315 break; 2316 if (SH(extra, pos + 2) != 24) 2317 break; 2318 // override the loc field, datatime here is 2319 // more "accurate" 2320 mtime = winToJavaTime(LL(extra, pos + 4)); 2321 atime = winToJavaTime(LL(extra, pos + 12)); 2322 ctime = winToJavaTime(LL(extra, pos + 20)); 2323 break; 2324 case EXTID_EXTT: 2325 // spec says the Extened timestamp in cen only has mtime 2326 // need to read the loc to get the extra a/ctime, if flag 2327 // "zipinfo-time" is not specified to false; 2328 // there is performance cost (move up to loc and read) to 2329 // access the loc table foreach entry; 2330 if (zipfs.noExtt) { 2331 if (sz == 5) 2332 mtime = unixToJavaTime(LG(extra, pos + 1)); 2333 break; 2334 } 2335 byte[] buf = new byte[LOCHDR]; 2336 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2337 != buf.length) 2338 throw new ZipException("loc: reading failed"); 2339 if (!locSigAt(buf, 0)) 2340 throw new ZipException("loc: wrong sig ->" 2341 + Long.toString(getSig(buf, 0), 16)); 2342 int locElen = LOCEXT(buf); 2343 if (locElen < 9) // EXTT is at lease 9 bytes 2344 break; 2345 int locNlen = LOCNAM(buf); 2346 buf = new byte[locElen]; 2347 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2348 != buf.length) 2349 throw new ZipException("loc extra: reading failed"); 2350 int locPos = 0; 2351 while (locPos + 4 < buf.length) { 2352 int locTag = SH(buf, locPos); 2353 int locSZ = SH(buf, locPos + 2); 2354 locPos += 4; 2355 if (locTag != EXTID_EXTT) { 2356 locPos += locSZ; 2357 continue; 2358 } 2359 int end = locPos + locSZ - 4; 2360 int flag = CH(buf, locPos++); 2361 if ((flag & 0x1) != 0 && locPos <= end) { 2362 mtime = unixToJavaTime(LG(buf, locPos)); 2363 locPos += 4; 2364 } 2365 if ((flag & 0x2) != 0 && locPos <= end) { 2366 atime = unixToJavaTime(LG(buf, locPos)); 2367 locPos += 4; 2368 } 2369 if ((flag & 0x4) != 0 && locPos <= end) { 2370 ctime = unixToJavaTime(LG(buf, locPos)); 2371 locPos += 4; 2372 } 2373 break; 2374 } 2375 break; 2376 default: // unknown tag 2377 System.arraycopy(extra, off, extra, newOff, sz + 4); 2378 newOff += (sz + 4); 2379 } 2380 off += (sz + 4); 2381 } 2382 if (newOff != 0 && newOff != extra.length) 2383 extra = Arrays.copyOf(extra, newOff); 2384 else 2385 extra = null; 2386 } 2387 2388 ///////// basic file attributes /////////// 2389 @Override 2390 public FileTime creationTime() { 2391 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2392 } 2393 2394 @Override 2395 public boolean isDirectory() { 2396 return isDir(); 2397 } 2398 2399 @Override 2400 public boolean isOther() { 2401 return false; 2402 } 2403 2404 @Override 2405 public boolean isRegularFile() { 2406 return !isDir(); 2407 } 2408 2409 @Override 2410 public FileTime lastAccessTime() { 2411 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2412 } 2413 2414 @Override 2415 public FileTime lastModifiedTime() { 2416 return FileTime.fromMillis(mtime); 2417 } 2418 2419 @Override 2420 public long size() { 2421 return size; 2422 } 2423 2424 @Override 2425 public boolean isSymbolicLink() { 2426 return false; 2427 } 2428 2429 @Override 2430 public Object fileKey() { 2431 return null; 2432 } 2433 2434 ///////// posix file attributes /////////// 2435 2436 @Override 2437 public UserPrincipal owner() { 2438 throw new UnsupportedOperationException( 2439 "ZipFileSystem does not support owner."); 2440 } 2441 2442 @Override 2443 public GroupPrincipal group() { 2444 throw new UnsupportedOperationException( 2445 "ZipFileSystem does not support group."); 2446 } 2447 2448 @Override 2449 public Set<PosixFilePermission> permissions() { 2450 if (posixPerms == -1) { 2451 // in case there are no Posix permissions associated with the 2452 // entry, we should not return an empty set of permissions 2453 // because that would be an explicit set of permissions meaning 2454 // no permissions for anyone 2455 throw new UnsupportedOperationException( 2456 "No posix permissions associated with zip entry."); 2457 } 2458 return ZipUtils.permsFromFlags(posixPerms); 2459 } 2460 2461 ///////// zip file attributes /////////// 2462 2463 @Override 2464 public long compressedSize() { 2465 return csize; 2466 } 2467 2468 @Override 2469 public long crc() { 2470 return crc; 2471 } 2472 2473 @Override 2474 public int method() { 2475 return method; 2476 } 2477 2478 @Override 2479 public byte[] extra() { 2480 if (extra != null) 2481 return Arrays.copyOf(extra, extra.length); 2482 return null; 2483 } 2484 2485 @Override 2486 public byte[] comment() { 2487 if (comment != null) 2488 return Arrays.copyOf(comment, comment.length); 2489 return null; 2490 } 2491 2492 @Override 2493 public String toString() { 2494 StringBuilder sb = new StringBuilder(1024); 2495 Formatter fm = new Formatter(sb); 2496 fm.format(" name : %s%n", new String(name)); 2497 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2498 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2499 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2500 fm.format(" isRegularFile : %b%n", isRegularFile()); 2501 fm.format(" isDirectory : %b%n", isDirectory()); 2502 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2503 fm.format(" isOther : %b%n", isOther()); 2504 fm.format(" fileKey : %s%n", fileKey()); 2505 fm.format(" size : %d%n", size()); 2506 fm.format(" compressedSize : %d%n", compressedSize()); 2507 fm.format(" crc : %x%n", crc()); 2508 fm.format(" method : %d%n", method()); 2509 if (posixPerms != -1) { 2510 fm.format(" permissions : %s%n", permissions()); 2511 } 2512 fm.close(); 2513 return sb.toString(); 2514 } 2515 } 2516 2517 // ZIP directory has two issues: 2518 // (1) ZIP spec does not require the ZIP file to include 2519 // directory entry 2520 // (2) all entries are not stored/organized in a "tree" 2521 // structure. 2522 // A possible solution is to build the node tree ourself as 2523 // implemented below. 2524 2525 // default time stamp for pseudo entries 2526 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2527 2528 private void removeFromTree(IndexNode inode) { 2529 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2530 IndexNode child = parent.child; 2531 if (child.equals(inode)) { 2532 parent.child = child.sibling; 2533 } else { 2534 IndexNode last = child; 2535 while ((child = child.sibling) != null) { 2536 if (child.equals(inode)) { 2537 last.sibling = child.sibling; 2538 break; 2539 } else { 2540 last = child; 2541 } 2542 } 2543 } 2544 } 2545 2546 // purely for parent lookup, so we don't have to copy the parent 2547 // name every time 2548 static class ParentLookup extends IndexNode { 2549 int len; 2550 ParentLookup() {} 2551 2552 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2553 name(name, len); 2554 return this; 2555 } 2556 2557 void name(byte[] name, int len) { 2558 this.name = name; 2559 this.len = len; 2560 // calculate the hashcode the same way as Arrays.hashCode() does 2561 int result = 1; 2562 for (int i = 0; i < len; i++) 2563 result = 31 * result + name[i]; 2564 this.hashcode = result; 2565 } 2566 2567 @Override 2568 public boolean equals(Object other) { 2569 if (!(other instanceof IndexNode)) { 2570 return false; 2571 } 2572 byte[] oname = ((IndexNode)other).name; 2573 return Arrays.equals(name, 0, len, 2574 oname, 0, oname.length); 2575 } 2576 2577 } 2578 2579 private void buildNodeTree() throws IOException { 2580 beginWrite(); 2581 try { 2582 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2583 if (root == null) { 2584 root = new IndexNode(ROOTPATH, true); 2585 } else { 2586 inodes.remove(root); 2587 } 2588 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2589 inodes.put(root, root); 2590 ParentLookup lookup = new ParentLookup(); 2591 for (IndexNode node : nodes) { 2592 IndexNode parent; 2593 while (true) { 2594 int off = getParentOff(node.name); 2595 if (off <= 1) { // parent is root 2596 node.sibling = root.child; 2597 root.child = node; 2598 break; 2599 } 2600 lookup = lookup.as(node.name, off); 2601 if (inodes.containsKey(lookup)) { 2602 parent = inodes.get(lookup); 2603 node.sibling = parent.child; 2604 parent.child = node; 2605 break; 2606 } 2607 // add new pseudo directory entry 2608 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2609 inodes.put(parent, parent); 2610 node.sibling = parent.child; 2611 parent.child = node; 2612 node = parent; 2613 } 2614 } 2615 } finally { 2616 endWrite(); 2617 } 2618 } 2619 }