1 /* 2 * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package jdk.nio.zipfs; 27 28 import static java.lang.Boolean.TRUE; 29 import static jdk.nio.zipfs.ZipConstants.*; 30 import static jdk.nio.zipfs.ZipUtils.*; 31 import static java.nio.file.StandardOpenOption.*; 32 import static java.nio.file.StandardCopyOption.*; 33 34 import java.io.BufferedOutputStream; 35 import java.io.ByteArrayInputStream; 36 import java.io.ByteArrayOutputStream; 37 import java.io.EOFException; 38 import java.io.FilterOutputStream; 39 import java.io.IOException; 40 import java.io.InputStream; 41 import java.io.OutputStream; 42 import java.nio.ByteBuffer; 43 import java.nio.MappedByteBuffer; 44 import java.nio.channels.FileChannel; 45 import java.nio.channels.FileLock; 46 import java.nio.channels.ReadableByteChannel; 47 import java.nio.channels.SeekableByteChannel; 48 import java.nio.channels.WritableByteChannel; 49 import java.nio.file.*; 50 import java.nio.file.attribute.FileAttribute; 51 import java.nio.file.attribute.FileTime; 52 import java.nio.file.attribute.GroupPrincipal; 53 import java.nio.file.attribute.PosixFilePermission; 54 import java.nio.file.attribute.PosixFilePermissions; 55 import java.nio.file.attribute.UserPrincipal; 56 import java.nio.file.attribute.UserPrincipalLookupService; 57 import java.nio.file.spi.FileSystemProvider; 58 import java.security.AccessController; 59 import java.security.PrivilegedAction; 60 import java.security.PrivilegedActionException; 61 import java.security.PrivilegedExceptionAction; 62 import java.util.ArrayList; 63 import java.util.Arrays; 64 import java.util.Collections; 65 import java.util.Formatter; 66 import java.util.HashSet; 67 import java.util.Iterator; 68 import java.util.LinkedHashMap; 69 import java.util.List; 70 import java.util.Map; 71 import java.util.Objects; 72 import java.util.Set; 73 import java.util.concurrent.locks.ReadWriteLock; 74 import java.util.concurrent.locks.ReentrantReadWriteLock; 75 import java.util.regex.Pattern; 76 import java.util.zip.CRC32; 77 import java.util.zip.Deflater; 78 import java.util.zip.DeflaterOutputStream; 79 import java.util.zip.Inflater; 80 import java.util.zip.InflaterInputStream; 81 import java.util.zip.ZipException; 82 83 /** 84 * A FileSystem built on a zip file 85 * 86 * @author Xueming Shen 87 */ 88 class ZipFileSystem extends FileSystem { 89 private static final int FILE_ATTRIBUTES_UNIX = 3; 90 private static final int VERSION_BASE_UNIX = FILE_ATTRIBUTES_UNIX << 8; 91 private final ZipFileSystemProvider provider; 92 private final Path zfpath; 93 final ZipCoder zc; 94 private final ZipPath rootdir; 95 private boolean readOnly = false; // readonly file system 96 97 // configurable by env map 98 private final boolean noExtt; // see readExtra() 99 private final boolean useTempFile; // use a temp file for newOS, default 100 // is to use BAOS for better performance 101 private static final boolean isWindows = AccessController.doPrivileged( 102 (PrivilegedAction<Boolean>) () -> System.getProperty("os.name") 103 .startsWith("Windows")); 104 private final boolean forceEnd64; 105 private final int defaultMethod; // METHOD_STORED if "noCompression=true" 106 // METHOD_DEFLATED otherwise 107 108 ZipFileSystem(ZipFileSystemProvider provider, 109 Path zfpath, 110 Map<String, ?> env) throws IOException 111 { 112 // default encoding for name/comment 113 String nameEncoding = env.containsKey("encoding") ? 114 (String)env.get("encoding") : "UTF-8"; 115 this.noExtt = "false".equals(env.get("zipinfo-time")); 116 this.useTempFile = isTrue(env, "useTempFile"); 117 this.forceEnd64 = isTrue(env, "forceZIP64End"); 118 this.defaultMethod = isTrue(env, "noCompression") ? METHOD_STORED: METHOD_DEFLATED; 119 if (Files.notExists(zfpath)) { 120 // create a new zip if not exists 121 if (isTrue(env, "create")) { 122 try (OutputStream os = Files.newOutputStream(zfpath, CREATE_NEW, WRITE)) { 123 new END().write(os, 0, forceEnd64); 124 } 125 } else { 126 throw new FileSystemNotFoundException(zfpath.toString()); 127 } 128 } 129 // sm and existence check 130 zfpath.getFileSystem().provider().checkAccess(zfpath, AccessMode.READ); 131 boolean writeable = AccessController.doPrivileged( 132 (PrivilegedAction<Boolean>) () -> Files.isWritable(zfpath)); 133 this.readOnly = !writeable; 134 this.zc = ZipCoder.get(nameEncoding); 135 this.rootdir = new ZipPath(this, new byte[]{'/'}); 136 this.ch = Files.newByteChannel(zfpath, READ); 137 try { 138 this.cen = initCEN(); 139 } catch (IOException x) { 140 try { 141 this.ch.close(); 142 } catch (IOException xx) { 143 x.addSuppressed(xx); 144 } 145 throw x; 146 } 147 this.provider = provider; 148 this.zfpath = zfpath; 149 } 150 151 // returns true if there is a name=true/"true" setting in env 152 private static boolean isTrue(Map<String, ?> env, String name) { 153 return "true".equals(env.get(name)) || TRUE.equals(env.get(name)); 154 } 155 156 @Override 157 public FileSystemProvider provider() { 158 return provider; 159 } 160 161 @Override 162 public String getSeparator() { 163 return "/"; 164 } 165 166 @Override 167 public boolean isOpen() { 168 return isOpen; 169 } 170 171 @Override 172 public boolean isReadOnly() { 173 return readOnly; 174 } 175 176 private void checkWritable() throws IOException { 177 if (readOnly) 178 throw new ReadOnlyFileSystemException(); 179 } 180 181 void setReadOnly() { 182 this.readOnly = true; 183 } 184 185 @Override 186 public Iterable<Path> getRootDirectories() { 187 return List.of(rootdir); 188 } 189 190 ZipPath getRootDir() { 191 return rootdir; 192 } 193 194 @Override 195 public ZipPath getPath(String first, String... more) { 196 if (more.length == 0) { 197 return new ZipPath(this, first); 198 } 199 StringBuilder sb = new StringBuilder(); 200 sb.append(first); 201 for (String path : more) { 202 if (path.length() > 0) { 203 if (sb.length() > 0) { 204 sb.append('/'); 205 } 206 sb.append(path); 207 } 208 } 209 return new ZipPath(this, sb.toString()); 210 } 211 212 @Override 213 public UserPrincipalLookupService getUserPrincipalLookupService() { 214 throw new UnsupportedOperationException(); 215 } 216 217 @Override 218 public WatchService newWatchService() { 219 throw new UnsupportedOperationException(); 220 } 221 222 FileStore getFileStore(ZipPath path) { 223 return new ZipFileStore(path); 224 } 225 226 @Override 227 public Iterable<FileStore> getFileStores() { 228 return List.of(new ZipFileStore(rootdir)); 229 } 230 231 private static final Set<String> supportedFileAttributeViews = 232 Set.of("basic", "zip"); 233 234 @Override 235 public Set<String> supportedFileAttributeViews() { 236 return supportedFileAttributeViews; 237 } 238 239 @Override 240 public String toString() { 241 return zfpath.toString(); 242 } 243 244 Path getZipFile() { 245 return zfpath; 246 } 247 248 private static final String GLOB_SYNTAX = "glob"; 249 private static final String REGEX_SYNTAX = "regex"; 250 251 @Override 252 public PathMatcher getPathMatcher(String syntaxAndInput) { 253 int pos = syntaxAndInput.indexOf(':'); 254 if (pos <= 0 || pos == syntaxAndInput.length()) { 255 throw new IllegalArgumentException(); 256 } 257 String syntax = syntaxAndInput.substring(0, pos); 258 String input = syntaxAndInput.substring(pos + 1); 259 String expr; 260 if (syntax.equalsIgnoreCase(GLOB_SYNTAX)) { 261 expr = toRegexPattern(input); 262 } else { 263 if (syntax.equalsIgnoreCase(REGEX_SYNTAX)) { 264 expr = input; 265 } else { 266 throw new UnsupportedOperationException("Syntax '" + syntax + 267 "' not recognized"); 268 } 269 } 270 // return matcher 271 final Pattern pattern = Pattern.compile(expr); 272 return new PathMatcher() { 273 @Override 274 public boolean matches(Path path) { 275 return pattern.matcher(path.toString()).matches(); 276 } 277 }; 278 } 279 280 @Override 281 public void close() throws IOException { 282 beginWrite(); 283 try { 284 if (!isOpen) 285 return; 286 isOpen = false; // set closed 287 } finally { 288 endWrite(); 289 } 290 if (!streams.isEmpty()) { // unlock and close all remaining streams 291 Set<InputStream> copy = new HashSet<>(streams); 292 for (InputStream is: copy) 293 is.close(); 294 } 295 beginWrite(); // lock and sync 296 try { 297 AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> { 298 sync(); return null; 299 }); 300 ch.close(); // close the ch just in case no update 301 // and sync didn't close the ch 302 } catch (PrivilegedActionException e) { 303 throw (IOException)e.getException(); 304 } finally { 305 endWrite(); 306 } 307 308 synchronized (inflaters) { 309 for (Inflater inf : inflaters) 310 inf.end(); 311 } 312 synchronized (deflaters) { 313 for (Deflater def : deflaters) 314 def.end(); 315 } 316 317 IOException ioe = null; 318 synchronized (tmppaths) { 319 for (Path p: tmppaths) { 320 try { 321 AccessController.doPrivileged( 322 (PrivilegedExceptionAction<Boolean>)() -> Files.deleteIfExists(p)); 323 } catch (PrivilegedActionException e) { 324 IOException x = (IOException)e.getException(); 325 if (ioe == null) 326 ioe = x; 327 else 328 ioe.addSuppressed(x); 329 } 330 } 331 } 332 provider.removeFileSystem(zfpath, this); 333 if (ioe != null) 334 throw ioe; 335 } 336 337 ZipFileAttributes getFileAttributes(byte[] path) 338 throws IOException 339 { 340 Entry e; 341 beginRead(); 342 try { 343 ensureOpen(); 344 e = getEntry(path); 345 if (e == null) { 346 IndexNode inode = getInode(path); 347 if (inode == null) 348 return null; 349 // pseudo directory, uses METHOD_STORED 350 e = new Entry(inode.name, inode.isdir, METHOD_STORED); 351 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp; 352 } 353 } finally { 354 endRead(); 355 } 356 return e; 357 } 358 359 void checkAccess(byte[] path) throws IOException { 360 beginRead(); 361 try { 362 ensureOpen(); 363 // is it necessary to readCEN as a sanity check? 364 if (getInode(path) == null) { 365 throw new NoSuchFileException(toString()); 366 } 367 368 } finally { 369 endRead(); 370 } 371 } 372 373 void setTimes(byte[] path, FileTime mtime, FileTime atime, FileTime ctime) 374 throws IOException 375 { 376 checkWritable(); 377 beginWrite(); 378 try { 379 ensureOpen(); 380 Entry e = getEntry(path); // ensureOpen checked 381 if (e == null) 382 throw new NoSuchFileException(getString(path)); 383 if (e.type == Entry.CEN) 384 e.type = Entry.COPY; // copy e 385 if (mtime != null) 386 e.mtime = mtime.toMillis(); 387 if (atime != null) 388 e.atime = atime.toMillis(); 389 if (ctime != null) 390 e.ctime = ctime.toMillis(); 391 update(e); 392 } finally { 393 endWrite(); 394 } 395 } 396 397 boolean exists(byte[] path) 398 throws IOException 399 { 400 beginRead(); 401 try { 402 ensureOpen(); 403 return getInode(path) != null; 404 } finally { 405 endRead(); 406 } 407 } 408 409 boolean isDirectory(byte[] path) 410 throws IOException 411 { 412 beginRead(); 413 try { 414 IndexNode n = getInode(path); 415 return n != null && n.isDir(); 416 } finally { 417 endRead(); 418 } 419 } 420 421 // returns the list of child paths of "path" 422 Iterator<Path> iteratorOf(byte[] path, 423 DirectoryStream.Filter<? super Path> filter) 424 throws IOException 425 { 426 beginWrite(); // iteration of inodes needs exclusive lock 427 try { 428 ensureOpen(); 429 IndexNode inode = getInode(path); 430 if (inode == null) 431 throw new NotDirectoryException(getString(path)); 432 List<Path> list = new ArrayList<>(); 433 IndexNode child = inode.child; 434 while (child != null) { 435 // assume all path from zip file itself is "normalized" 436 ZipPath zp = new ZipPath(this, child.name, true); 437 if (filter == null || filter.accept(zp)) 438 list.add(zp); 439 child = child.sibling; 440 } 441 return list.iterator(); 442 } finally { 443 endWrite(); 444 } 445 } 446 447 void createDirectory(byte[] dir, FileAttribute<?>... attrs) 448 throws IOException 449 { 450 checkWritable(); 451 // dir = toDirectoryPath(dir); 452 beginWrite(); 453 try { 454 ensureOpen(); 455 if (dir.length == 0 || exists(dir)) // root dir, or exiting dir 456 throw new FileAlreadyExistsException(getString(dir)); 457 checkParents(dir); 458 Entry e = new Entry(dir, Entry.NEW, true, METHOD_STORED, attrs); 459 update(e); 460 } finally { 461 endWrite(); 462 } 463 } 464 465 void copyFile(boolean deletesrc, byte[]src, byte[] dst, CopyOption... options) 466 throws IOException 467 { 468 checkWritable(); 469 if (Arrays.equals(src, dst)) 470 return; // do nothing, src and dst are the same 471 472 beginWrite(); 473 try { 474 ensureOpen(); 475 Entry eSrc = getEntry(src); // ensureOpen checked 476 477 if (eSrc == null) 478 throw new NoSuchFileException(getString(src)); 479 if (eSrc.isDir()) { // spec says to create dst dir 480 createDirectory(dst); 481 return; 482 } 483 boolean hasReplace = false; 484 boolean hasCopyAttrs = false; 485 for (CopyOption opt : options) { 486 if (opt == REPLACE_EXISTING) 487 hasReplace = true; 488 else if (opt == COPY_ATTRIBUTES) 489 hasCopyAttrs = true; 490 } 491 Entry eDst = getEntry(dst); 492 if (eDst != null) { 493 if (!hasReplace) 494 throw new FileAlreadyExistsException(getString(dst)); 495 } else { 496 checkParents(dst); 497 } 498 Entry u = new Entry(eSrc, Entry.COPY); // copy eSrc entry 499 u.name(dst); // change name 500 if (eSrc.type == Entry.NEW || eSrc.type == Entry.FILECH) 501 { 502 u.type = eSrc.type; // make it the same type 503 if (deletesrc) { // if it's a "rename", take the data 504 u.bytes = eSrc.bytes; 505 u.file = eSrc.file; 506 } else { // if it's not "rename", copy the data 507 if (eSrc.bytes != null) 508 u.bytes = Arrays.copyOf(eSrc.bytes, eSrc.bytes.length); 509 else if (eSrc.file != null) { 510 u.file = getTempPathForEntry(null); 511 Files.copy(eSrc.file, u.file, REPLACE_EXISTING); 512 } 513 } 514 } 515 if (!hasCopyAttrs) 516 u.mtime = u.atime= u.ctime = System.currentTimeMillis(); 517 update(u); 518 if (deletesrc) 519 updateDelete(eSrc); 520 } finally { 521 endWrite(); 522 } 523 } 524 525 // Returns an output stream for writing the contents into the specified 526 // entry. 527 OutputStream newOutputStream(byte[] path, OpenOption... options) 528 throws IOException 529 { 530 checkWritable(); 531 boolean hasCreateNew = false; 532 boolean hasCreate = false; 533 boolean hasAppend = false; 534 boolean hasTruncate = false; 535 for (OpenOption opt: options) { 536 if (opt == READ) 537 throw new IllegalArgumentException("READ not allowed"); 538 if (opt == CREATE_NEW) 539 hasCreateNew = true; 540 if (opt == CREATE) 541 hasCreate = true; 542 if (opt == APPEND) 543 hasAppend = true; 544 if (opt == TRUNCATE_EXISTING) 545 hasTruncate = true; 546 } 547 if (hasAppend && hasTruncate) 548 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 549 beginRead(); // only need a readlock, the "update()" will 550 try { // try to obtain a writelock when the os is 551 ensureOpen(); // being closed. 552 Entry e = getEntry(path); 553 if (e != null) { 554 if (e.isDir() || hasCreateNew) 555 throw new FileAlreadyExistsException(getString(path)); 556 if (hasAppend) { 557 InputStream is = getInputStream(e); 558 OutputStream os = getOutputStream(new Entry(e, Entry.NEW)); 559 is.transferTo(os); 560 is.close(); 561 return os; 562 } 563 return getOutputStream(new Entry(e, Entry.NEW)); 564 } else { 565 if (!hasCreate && !hasCreateNew) 566 throw new NoSuchFileException(getString(path)); 567 checkParents(path); 568 return getOutputStream(new Entry(path, Entry.NEW, false, defaultMethod)); 569 } 570 } finally { 571 endRead(); 572 } 573 } 574 575 // Returns an input stream for reading the contents of the specified 576 // file entry. 577 InputStream newInputStream(byte[] path) throws IOException { 578 beginRead(); 579 try { 580 ensureOpen(); 581 Entry e = getEntry(path); 582 if (e == null) 583 throw new NoSuchFileException(getString(path)); 584 if (e.isDir()) 585 throw new FileSystemException(getString(path), "is a directory", null); 586 return getInputStream(e); 587 } finally { 588 endRead(); 589 } 590 } 591 592 private void checkOptions(Set<? extends OpenOption> options) { 593 // check for options of null type and option is an intance of StandardOpenOption 594 for (OpenOption option : options) { 595 if (option == null) 596 throw new NullPointerException(); 597 if (!(option instanceof StandardOpenOption)) 598 throw new IllegalArgumentException(); 599 } 600 if (options.contains(APPEND) && options.contains(TRUNCATE_EXISTING)) 601 throw new IllegalArgumentException("APPEND + TRUNCATE_EXISTING not allowed"); 602 } 603 604 605 // Returns an output SeekableByteChannel for either 606 // (1) writing the contents of a new entry, if the entry doesn't exit, or 607 // (2) updating/replacing the contents of an existing entry. 608 // Note: The content is not compressed. 609 private class EntryOutputChannel extends ByteArrayChannel { 610 Entry e; 611 612 EntryOutputChannel(Entry e) throws IOException { 613 super(e.size > 0? (int)e.size : 8192, false); 614 this.e = e; 615 if (e.mtime == -1) 616 e.mtime = System.currentTimeMillis(); 617 if (e.method == -1) 618 e.method = defaultMethod; 619 // store size, compressed size, and crc-32 in datadescriptor 620 e.flag = FLAG_DATADESCR; 621 if (zc.isUTF8()) 622 e.flag |= FLAG_USE_UTF8; 623 } 624 625 @Override 626 public void close() throws IOException { 627 e.bytes = toByteArray(); 628 e.size = e.bytes.length; 629 e.crc = -1; 630 super.close(); 631 update(e); 632 } 633 } 634 635 private int getCompressMethod(FileAttribute<?>... attrs) { 636 return defaultMethod; 637 } 638 639 // Returns a Writable/ReadByteChannel for now. Might consdier to use 640 // newFileChannel() instead, which dump the entry data into a regular 641 // file on the default file system and create a FileChannel on top of 642 // it. 643 SeekableByteChannel newByteChannel(byte[] path, 644 Set<? extends OpenOption> options, 645 FileAttribute<?>... attrs) 646 throws IOException 647 { 648 checkOptions(options); 649 if (options.contains(StandardOpenOption.WRITE) || 650 options.contains(StandardOpenOption.APPEND)) { 651 checkWritable(); 652 beginRead(); // only need a readlock, the "update()" will obtain 653 // thewritelock when the channel is closed 654 try { 655 ensureOpen(); 656 Entry e = getEntry(path); 657 if (e != null) { 658 if (e.isDir() || options.contains(CREATE_NEW)) 659 throw new FileAlreadyExistsException(getString(path)); 660 SeekableByteChannel sbc = 661 new EntryOutputChannel(new Entry(e, Entry.NEW)); 662 if (options.contains(APPEND)) { 663 try (InputStream is = getInputStream(e)) { // copyover 664 byte[] buf = new byte[8192]; 665 ByteBuffer bb = ByteBuffer.wrap(buf); 666 int n; 667 while ((n = is.read(buf)) != -1) { 668 bb.position(0); 669 bb.limit(n); 670 sbc.write(bb); 671 } 672 } 673 } 674 return sbc; 675 } 676 if (!options.contains(CREATE) && !options.contains(CREATE_NEW)) 677 throw new NoSuchFileException(getString(path)); 678 checkParents(path); 679 return new EntryOutputChannel( 680 new Entry(path, Entry.NEW, false, getCompressMethod(attrs), attrs)); 681 682 } finally { 683 endRead(); 684 } 685 } else { 686 beginRead(); 687 try { 688 ensureOpen(); 689 Entry e = getEntry(path); 690 if (e == null || e.isDir()) 691 throw new NoSuchFileException(getString(path)); 692 try (InputStream is = getInputStream(e)) { 693 // TBD: if (e.size < NNNNN); 694 return new ByteArrayChannel(is.readAllBytes(), true); 695 } 696 } finally { 697 endRead(); 698 } 699 } 700 } 701 702 // Returns a FileChannel of the specified entry. 703 // 704 // This implementation creates a temporary file on the default file system, 705 // copy the entry data into it if the entry exists, and then create a 706 // FileChannel on top of it. 707 FileChannel newFileChannel(byte[] path, 708 Set<? extends OpenOption> options, 709 FileAttribute<?>... attrs) 710 throws IOException 711 { 712 checkOptions(options); 713 final boolean forWrite = (options.contains(StandardOpenOption.WRITE) || 714 options.contains(StandardOpenOption.APPEND)); 715 beginRead(); 716 try { 717 ensureOpen(); 718 Entry e = getEntry(path); 719 if (forWrite) { 720 checkWritable(); 721 if (e == null) { 722 if (!options.contains(StandardOpenOption.CREATE) && 723 !options.contains(StandardOpenOption.CREATE_NEW)) { 724 throw new NoSuchFileException(getString(path)); 725 } 726 } else { 727 if (options.contains(StandardOpenOption.CREATE_NEW)) { 728 throw new FileAlreadyExistsException(getString(path)); 729 } 730 if (e.isDir()) 731 throw new FileAlreadyExistsException("directory <" 732 + getString(path) + "> exists"); 733 } 734 options = new HashSet<>(options); 735 options.remove(StandardOpenOption.CREATE_NEW); // for tmpfile 736 } else if (e == null || e.isDir()) { 737 throw new NoSuchFileException(getString(path)); 738 } 739 740 final boolean isFCH = (e != null && e.type == Entry.FILECH); 741 final Path tmpfile = isFCH ? e.file : getTempPathForEntry(path); 742 final FileChannel fch = tmpfile.getFileSystem() 743 .provider() 744 .newFileChannel(tmpfile, options, attrs); 745 final Entry u = isFCH ? e : new Entry(path, tmpfile, Entry.FILECH, attrs); 746 if (forWrite) { 747 u.flag = FLAG_DATADESCR; 748 u.method = getCompressMethod(attrs); 749 } 750 // is there a better way to hook into the FileChannel's close method? 751 return new FileChannel() { 752 public int write(ByteBuffer src) throws IOException { 753 return fch.write(src); 754 } 755 public long write(ByteBuffer[] srcs, int offset, int length) 756 throws IOException 757 { 758 return fch.write(srcs, offset, length); 759 } 760 public long position() throws IOException { 761 return fch.position(); 762 } 763 public FileChannel position(long newPosition) 764 throws IOException 765 { 766 fch.position(newPosition); 767 return this; 768 } 769 public long size() throws IOException { 770 return fch.size(); 771 } 772 public FileChannel truncate(long size) 773 throws IOException 774 { 775 fch.truncate(size); 776 return this; 777 } 778 public void force(boolean metaData) 779 throws IOException 780 { 781 fch.force(metaData); 782 } 783 public long transferTo(long position, long count, 784 WritableByteChannel target) 785 throws IOException 786 { 787 return fch.transferTo(position, count, target); 788 } 789 public long transferFrom(ReadableByteChannel src, 790 long position, long count) 791 throws IOException 792 { 793 return fch.transferFrom(src, position, count); 794 } 795 public int read(ByteBuffer dst) throws IOException { 796 return fch.read(dst); 797 } 798 public int read(ByteBuffer dst, long position) 799 throws IOException 800 { 801 return fch.read(dst, position); 802 } 803 public long read(ByteBuffer[] dsts, int offset, int length) 804 throws IOException 805 { 806 return fch.read(dsts, offset, length); 807 } 808 public int write(ByteBuffer src, long position) 809 throws IOException 810 { 811 return fch.write(src, position); 812 } 813 public MappedByteBuffer map(MapMode mode, 814 long position, long size) 815 throws IOException 816 { 817 throw new UnsupportedOperationException(); 818 } 819 public FileLock lock(long position, long size, boolean shared) 820 throws IOException 821 { 822 return fch.lock(position, size, shared); 823 } 824 public FileLock tryLock(long position, long size, boolean shared) 825 throws IOException 826 { 827 return fch.tryLock(position, size, shared); 828 } 829 protected void implCloseChannel() throws IOException { 830 fch.close(); 831 if (forWrite) { 832 u.mtime = System.currentTimeMillis(); 833 u.size = Files.size(u.file); 834 835 update(u); 836 } else { 837 if (!isFCH) // if this is a new fch for reading 838 removeTempPathForEntry(tmpfile); 839 } 840 } 841 }; 842 } finally { 843 endRead(); 844 } 845 } 846 847 // the outstanding input streams that need to be closed 848 private Set<InputStream> streams = 849 Collections.synchronizedSet(new HashSet<InputStream>()); 850 851 private Set<Path> tmppaths = Collections.synchronizedSet(new HashSet<Path>()); 852 private Path getTempPathForEntry(byte[] path) throws IOException { 853 Path tmpPath = createTempFileInSameDirectoryAs(zfpath); 854 if (path != null) { 855 Entry e = getEntry(path); 856 if (e != null) { 857 try (InputStream is = newInputStream(path)) { 858 Files.copy(is, tmpPath, REPLACE_EXISTING); 859 } 860 } 861 } 862 return tmpPath; 863 } 864 865 private void removeTempPathForEntry(Path path) throws IOException { 866 Files.delete(path); 867 tmppaths.remove(path); 868 } 869 870 // check if all parents really exit. ZIP spec does not require 871 // the existence of any "parent directory". 872 private void checkParents(byte[] path) throws IOException { 873 beginRead(); 874 try { 875 while ((path = getParent(path)) != null && 876 path != ROOTPATH) { 877 if (!inodes.containsKey(IndexNode.keyOf(path))) { 878 throw new NoSuchFileException(getString(path)); 879 } 880 } 881 } finally { 882 endRead(); 883 } 884 } 885 886 private static byte[] ROOTPATH = new byte[] { '/' }; 887 private static byte[] getParent(byte[] path) { 888 int off = getParentOff(path); 889 if (off <= 1) 890 return ROOTPATH; 891 return Arrays.copyOf(path, off); 892 } 893 894 private static int getParentOff(byte[] path) { 895 int off = path.length - 1; 896 if (off > 0 && path[off] == '/') // isDirectory 897 off--; 898 while (off > 0 && path[off] != '/') { off--; } 899 return off; 900 } 901 902 private final void beginWrite() { 903 rwlock.writeLock().lock(); 904 } 905 906 private final void endWrite() { 907 rwlock.writeLock().unlock(); 908 } 909 910 private final void beginRead() { 911 rwlock.readLock().lock(); 912 } 913 914 private final void endRead() { 915 rwlock.readLock().unlock(); 916 } 917 918 /////////////////////////////////////////////////////////////////// 919 920 private volatile boolean isOpen = true; 921 private final SeekableByteChannel ch; // channel to the zipfile 922 final byte[] cen; // CEN & ENDHDR 923 private END end; 924 private long locpos; // position of first LOC header (usually 0) 925 926 private final ReadWriteLock rwlock = new ReentrantReadWriteLock(); 927 928 // name -> pos (in cen), IndexNode itself can be used as a "key" 929 private LinkedHashMap<IndexNode, IndexNode> inodes; 930 931 final byte[] getBytes(String name) { 932 return zc.getBytes(name); 933 } 934 935 final String getString(byte[] name) { 936 return zc.toString(name); 937 } 938 939 @SuppressWarnings("deprecation") 940 protected void finalize() throws IOException { 941 close(); 942 } 943 944 // Reads len bytes of data from the specified offset into buf. 945 // Returns the total number of bytes read. 946 // Each/every byte read from here (except the cen, which is mapped). 947 final long readFullyAt(byte[] buf, int off, long len, long pos) 948 throws IOException 949 { 950 ByteBuffer bb = ByteBuffer.wrap(buf); 951 bb.position(off); 952 bb.limit((int)(off + len)); 953 return readFullyAt(bb, pos); 954 } 955 956 private final long readFullyAt(ByteBuffer bb, long pos) 957 throws IOException 958 { 959 synchronized(ch) { 960 return ch.position(pos).read(bb); 961 } 962 } 963 964 // Searches for end of central directory (END) header. The contents of 965 // the END header will be read and placed in endbuf. Returns the file 966 // position of the END header, otherwise returns -1 if the END header 967 // was not found or an error occurred. 968 private END findEND() throws IOException 969 { 970 byte[] buf = new byte[READBLOCKSZ]; 971 long ziplen = ch.size(); 972 long minHDR = (ziplen - END_MAXLEN) > 0 ? ziplen - END_MAXLEN : 0; 973 long minPos = minHDR - (buf.length - ENDHDR); 974 975 for (long pos = ziplen - buf.length; pos >= minPos; pos -= (buf.length - ENDHDR)) 976 { 977 int off = 0; 978 if (pos < 0) { 979 // Pretend there are some NUL bytes before start of file 980 off = (int)-pos; 981 Arrays.fill(buf, 0, off, (byte)0); 982 } 983 int len = buf.length - off; 984 if (readFullyAt(buf, off, len, pos + off) != len) 985 zerror("zip END header not found"); 986 987 // Now scan the block backwards for END header signature 988 for (int i = buf.length - ENDHDR; i >= 0; i--) { 989 if (buf[i+0] == (byte)'P' && 990 buf[i+1] == (byte)'K' && 991 buf[i+2] == (byte)'\005' && 992 buf[i+3] == (byte)'\006' && 993 (pos + i + ENDHDR + ENDCOM(buf, i) == ziplen)) { 994 // Found END header 995 buf = Arrays.copyOfRange(buf, i, i + ENDHDR); 996 END end = new END(); 997 end.endsub = ENDSUB(buf); 998 end.centot = ENDTOT(buf); 999 end.cenlen = ENDSIZ(buf); 1000 end.cenoff = ENDOFF(buf); 1001 end.comlen = ENDCOM(buf); 1002 end.endpos = pos + i; 1003 // try if there is zip64 end; 1004 byte[] loc64 = new byte[ZIP64_LOCHDR]; 1005 if (end.endpos < ZIP64_LOCHDR || 1006 readFullyAt(loc64, 0, loc64.length, end.endpos - ZIP64_LOCHDR) 1007 != loc64.length || 1008 !locator64SigAt(loc64, 0)) { 1009 return end; 1010 } 1011 long end64pos = ZIP64_LOCOFF(loc64); 1012 byte[] end64buf = new byte[ZIP64_ENDHDR]; 1013 if (readFullyAt(end64buf, 0, end64buf.length, end64pos) 1014 != end64buf.length || 1015 !end64SigAt(end64buf, 0)) { 1016 return end; 1017 } 1018 // end64 found, 1019 long cenlen64 = ZIP64_ENDSIZ(end64buf); 1020 long cenoff64 = ZIP64_ENDOFF(end64buf); 1021 long centot64 = ZIP64_ENDTOT(end64buf); 1022 // double-check 1023 if (cenlen64 != end.cenlen && end.cenlen != ZIP64_MINVAL || 1024 cenoff64 != end.cenoff && end.cenoff != ZIP64_MINVAL || 1025 centot64 != end.centot && end.centot != ZIP64_MINVAL32) { 1026 return end; 1027 } 1028 // to use the end64 values 1029 end.cenlen = cenlen64; 1030 end.cenoff = cenoff64; 1031 end.centot = (int)centot64; // assume total < 2g 1032 end.endpos = end64pos; 1033 return end; 1034 } 1035 } 1036 } 1037 zerror("zip END header not found"); 1038 return null; //make compiler happy 1039 } 1040 1041 // Reads zip file central directory. Returns the file position of first 1042 // CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL 1043 // then the error was a zip format error and zip->msg has the error text. 1044 // Always pass in -1 for knownTotal; it's used for a recursive call. 1045 private byte[] initCEN() throws IOException { 1046 end = findEND(); 1047 if (end.endpos == 0) { 1048 inodes = new LinkedHashMap<>(10); 1049 locpos = 0; 1050 buildNodeTree(); 1051 return null; // only END header present 1052 } 1053 if (end.cenlen > end.endpos) 1054 zerror("invalid END header (bad central directory size)"); 1055 long cenpos = end.endpos - end.cenlen; // position of CEN table 1056 1057 // Get position of first local file (LOC) header, taking into 1058 // account that there may be a stub prefixed to the zip file. 1059 locpos = cenpos - end.cenoff; 1060 if (locpos < 0) 1061 zerror("invalid END header (bad central directory offset)"); 1062 1063 // read in the CEN and END 1064 byte[] cen = new byte[(int)(end.cenlen + ENDHDR)]; 1065 if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen + ENDHDR) { 1066 zerror("read CEN tables failed"); 1067 } 1068 // Iterate through the entries in the central directory 1069 inodes = new LinkedHashMap<>(end.centot + 1); 1070 int pos = 0; 1071 int limit = cen.length - ENDHDR; 1072 while (pos < limit) { 1073 if (!cenSigAt(cen, pos)) 1074 zerror("invalid CEN header (bad signature)"); 1075 int method = CENHOW(cen, pos); 1076 int nlen = CENNAM(cen, pos); 1077 int elen = CENEXT(cen, pos); 1078 int clen = CENCOM(cen, pos); 1079 if ((CENFLG(cen, pos) & 1) != 0) { 1080 zerror("invalid CEN header (encrypted entry)"); 1081 } 1082 if (method != METHOD_STORED && method != METHOD_DEFLATED) { 1083 zerror("invalid CEN header (unsupported compression method: " + method + ")"); 1084 } 1085 if (pos + CENHDR + nlen > limit) { 1086 zerror("invalid CEN header (bad header size)"); 1087 } 1088 IndexNode inode = new IndexNode(cen, pos, nlen); 1089 inodes.put(inode, inode); 1090 1091 // skip ext and comment 1092 pos += (CENHDR + nlen + elen + clen); 1093 } 1094 if (pos + ENDHDR != cen.length) { 1095 zerror("invalid CEN header (bad header size)"); 1096 } 1097 buildNodeTree(); 1098 return cen; 1099 } 1100 1101 private void ensureOpen() throws IOException { 1102 if (!isOpen) 1103 throw new ClosedFileSystemException(); 1104 } 1105 1106 // Creates a new empty temporary file in the same directory as the 1107 // specified file. A variant of Files.createTempFile. 1108 private Path createTempFileInSameDirectoryAs(Path path) 1109 throws IOException 1110 { 1111 Path parent = path.toAbsolutePath().getParent(); 1112 Path dir = (parent == null) ? path.getFileSystem().getPath(".") : parent; 1113 Path tmpPath = Files.createTempFile(dir, "zipfstmp", null); 1114 tmppaths.add(tmpPath); 1115 return tmpPath; 1116 } 1117 1118 ////////////////////update & sync ////////////////////////////////////// 1119 1120 private boolean hasUpdate = false; 1121 1122 // shared key. consumer guarantees the "writeLock" before use it. 1123 private final IndexNode LOOKUPKEY = new IndexNode(null, -1); 1124 1125 private void updateDelete(IndexNode inode) { 1126 beginWrite(); 1127 try { 1128 removeFromTree(inode); 1129 inodes.remove(inode); 1130 hasUpdate = true; 1131 } finally { 1132 endWrite(); 1133 } 1134 } 1135 1136 private void update(Entry e) { 1137 beginWrite(); 1138 try { 1139 IndexNode old = inodes.put(e, e); 1140 if (old != null) { 1141 removeFromTree(old); 1142 } 1143 if (e.type == Entry.NEW || e.type == Entry.FILECH || e.type == Entry.COPY) { 1144 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(e.name))); 1145 e.sibling = parent.child; 1146 parent.child = e; 1147 } 1148 hasUpdate = true; 1149 } finally { 1150 endWrite(); 1151 } 1152 } 1153 1154 // copy over the whole LOC entry (header if necessary, data and ext) from 1155 // old zip to the new one. 1156 private long copyLOCEntry(Entry e, boolean updateHeader, 1157 OutputStream os, 1158 long written, byte[] buf) 1159 throws IOException 1160 { 1161 long locoff = e.locoff; // where to read 1162 e.locoff = written; // update the e.locoff with new value 1163 1164 // calculate the size need to write out 1165 long size = 0; 1166 // if there is A ext 1167 if ((e.flag & FLAG_DATADESCR) != 0) { 1168 if (e.size >= ZIP64_MINVAL || e.csize >= ZIP64_MINVAL) 1169 size = 24; 1170 else 1171 size = 16; 1172 } 1173 // read loc, use the original loc.elen/nlen 1174 // 1175 // an extra byte after loc is read, which should be the first byte of the 1176 // 'name' field of the loc. if this byte is '/', which means the original 1177 // entry has an absolute path in original zip/jar file, the e.writeLOC() 1178 // is used to output the loc, in which the leading "/" will be removed 1179 if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1) 1180 throw new ZipException("loc: reading failed"); 1181 1182 if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') { 1183 locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf); // skip header 1184 size += e.csize; 1185 written = e.writeLOC(os) + size; 1186 } else { 1187 os.write(buf, 0, LOCHDR); // write out the loc header 1188 locoff += LOCHDR; 1189 // use e.csize, LOCSIZ(buf) is zero if FLAG_DATADESCR is on 1190 // size += LOCNAM(buf) + LOCEXT(buf) + LOCSIZ(buf); 1191 size += LOCNAM(buf) + LOCEXT(buf) + e.csize; 1192 written = LOCHDR + size; 1193 } 1194 int n; 1195 while (size > 0 && 1196 (n = (int)readFullyAt(buf, 0, buf.length, locoff)) != -1) 1197 { 1198 if (size < n) 1199 n = (int)size; 1200 os.write(buf, 0, n); 1201 size -= n; 1202 locoff += n; 1203 } 1204 return written; 1205 } 1206 1207 private long writeEntry(Entry e, OutputStream os, byte[] buf) 1208 throws IOException { 1209 1210 if (e.bytes == null && e.file == null) // dir, 0-length data 1211 return 0; 1212 1213 long written = 0; 1214 try (OutputStream os2 = e.method == METHOD_STORED ? 1215 new EntryOutputStreamCRC32(e, os) : new EntryOutputStreamDef(e, os)) { 1216 if (e.bytes != null) { // in-memory 1217 os2.write(e.bytes, 0, e.bytes.length); 1218 } else if (e.file != null) { // tmp file 1219 if (e.type == Entry.NEW || e.type == Entry.FILECH) { 1220 try (InputStream is = Files.newInputStream(e.file)) { 1221 is.transferTo(os2); 1222 } 1223 } 1224 Files.delete(e.file); 1225 tmppaths.remove(e.file); 1226 } 1227 } 1228 written += e.csize; 1229 if ((e.flag & FLAG_DATADESCR) != 0) { 1230 written += e.writeEXT(os); 1231 } 1232 return written; 1233 } 1234 1235 // sync the zip file system, if there is any udpate 1236 private void sync() throws IOException { 1237 1238 if (!hasUpdate) 1239 return; 1240 Path tmpFile = createTempFileInSameDirectoryAs(zfpath); 1241 try (OutputStream os = new BufferedOutputStream(Files.newOutputStream(tmpFile, WRITE))) 1242 { 1243 ArrayList<Entry> elist = new ArrayList<>(inodes.size()); 1244 long written = 0; 1245 byte[] buf = new byte[8192]; 1246 Entry e = null; 1247 1248 // write loc 1249 for (IndexNode inode : inodes.values()) { 1250 if (inode instanceof Entry) { // an updated inode 1251 e = (Entry)inode; 1252 try { 1253 if (e.type == Entry.COPY) { 1254 // entry copy: the only thing changed is the "name" 1255 // and "nlen" in LOC header, so we udpate/rewrite the 1256 // LOC in new file and simply copy the rest (data and 1257 // ext) without enflating/deflating from the old zip 1258 // file LOC entry. 1259 written += copyLOCEntry(e, true, os, written, buf); 1260 } else { // NEW, FILECH or CEN 1261 e.locoff = written; 1262 written += e.writeLOC(os); // write loc header 1263 written += writeEntry(e, os, buf); 1264 } 1265 elist.add(e); 1266 } catch (IOException x) { 1267 x.printStackTrace(); // skip any in-accurate entry 1268 } 1269 } else { // unchanged inode 1270 if (inode.pos == -1) { 1271 continue; // pseudo directory node 1272 } 1273 if (inode.name.length == 1 && inode.name[0] == '/') { 1274 continue; // no root '/' directory even it 1275 // exits in original zip/jar file. 1276 } 1277 e = Entry.readCEN(this, inode); 1278 try { 1279 written += copyLOCEntry(e, false, os, written, buf); 1280 elist.add(e); 1281 } catch (IOException x) { 1282 x.printStackTrace(); // skip any wrong entry 1283 } 1284 } 1285 } 1286 1287 // now write back the cen and end table 1288 end.cenoff = written; 1289 for (Entry entry : elist) { 1290 written += entry.writeCEN(os); 1291 } 1292 end.centot = elist.size(); 1293 end.cenlen = written - end.cenoff; 1294 end.write(os, written, forceEnd64); 1295 } 1296 1297 ch.close(); 1298 Files.delete(zfpath); 1299 Files.move(tmpFile, zfpath, REPLACE_EXISTING); 1300 hasUpdate = false; // clear 1301 } 1302 1303 IndexNode getInode(byte[] path) { 1304 if (path == null) 1305 throw new NullPointerException("path"); 1306 return inodes.get(IndexNode.keyOf(path)); 1307 } 1308 1309 Entry getEntry(byte[] path) throws IOException { 1310 IndexNode inode = getInode(path); 1311 if (inode instanceof Entry) 1312 return (Entry)inode; 1313 if (inode == null || inode.pos == -1) 1314 return null; 1315 return Entry.readCEN(this, inode); 1316 } 1317 1318 public void deleteFile(byte[] path, boolean failIfNotExists) 1319 throws IOException 1320 { 1321 checkWritable(); 1322 1323 IndexNode inode = getInode(path); 1324 if (inode == null) { 1325 if (path != null && path.length == 0) 1326 throw new ZipException("root directory </> can't not be delete"); 1327 if (failIfNotExists) 1328 throw new NoSuchFileException(getString(path)); 1329 } else { 1330 if (inode.isDir() && inode.child != null) 1331 throw new DirectoryNotEmptyException(getString(path)); 1332 updateDelete(inode); 1333 } 1334 } 1335 1336 // Returns an out stream for either 1337 // (1) writing the contents of a new entry, if the entry exits, or 1338 // (2) updating/replacing the contents of the specified existing entry. 1339 private OutputStream getOutputStream(Entry e) throws IOException { 1340 1341 if (e.mtime == -1) 1342 e.mtime = System.currentTimeMillis(); 1343 if (e.method == -1) 1344 e.method = defaultMethod; 1345 // store size, compressed size, and crc-32 in datadescr 1346 e.flag = FLAG_DATADESCR; 1347 if (zc.isUTF8()) 1348 e.flag |= FLAG_USE_UTF8; 1349 OutputStream os; 1350 if (useTempFile) { 1351 e.file = getTempPathForEntry(null); 1352 os = Files.newOutputStream(e.file, WRITE); 1353 } else { 1354 os = new ByteArrayOutputStream((e.size > 0)? (int)e.size : 8192); 1355 } 1356 return new EntryOutputStream(e, os); 1357 } 1358 1359 private class EntryOutputStream extends FilterOutputStream { 1360 private Entry e; 1361 private long written; 1362 private boolean isClosed; 1363 1364 EntryOutputStream(Entry e, OutputStream os) throws IOException { 1365 super(os); 1366 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1367 // this.written = 0; 1368 } 1369 1370 @Override 1371 public synchronized void write(int b) throws IOException { 1372 out.write(b); 1373 written += 1; 1374 } 1375 1376 @Override 1377 public synchronized void write(byte b[], int off, int len) 1378 throws IOException { 1379 out.write(b, off, len); 1380 written += len; 1381 } 1382 1383 @Override 1384 public synchronized void close() throws IOException { 1385 if (isClosed) { 1386 return; 1387 } 1388 isClosed = true; 1389 e.size = written; 1390 if (out instanceof ByteArrayOutputStream) 1391 e.bytes = ((ByteArrayOutputStream)out).toByteArray(); 1392 super.close(); 1393 update(e); 1394 } 1395 } 1396 1397 // Wrapper output stream class to write out a "stored" entry. 1398 // (1) this class does not close the underlying out stream when 1399 // being closed. 1400 // (2) no need to be "synchronized", only used by sync() 1401 private class EntryOutputStreamCRC32 extends FilterOutputStream { 1402 private Entry e; 1403 private CRC32 crc; 1404 private long written; 1405 private boolean isClosed; 1406 1407 EntryOutputStreamCRC32(Entry e, OutputStream os) throws IOException { 1408 super(os); 1409 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1410 this.crc = new CRC32(); 1411 } 1412 1413 @Override 1414 public void write(int b) throws IOException { 1415 out.write(b); 1416 crc.update(b); 1417 written += 1; 1418 } 1419 1420 @Override 1421 public void write(byte b[], int off, int len) 1422 throws IOException { 1423 out.write(b, off, len); 1424 crc.update(b, off, len); 1425 written += len; 1426 } 1427 1428 @Override 1429 public void close() throws IOException { 1430 if (isClosed) 1431 return; 1432 isClosed = true; 1433 e.size = e.csize = written; 1434 e.crc = crc.getValue(); 1435 } 1436 } 1437 1438 // Wrapper output stream class to write out a "deflated" entry. 1439 // (1) this class does not close the underlying out stream when 1440 // being closed. 1441 // (2) no need to be "synchronized", only used by sync() 1442 private class EntryOutputStreamDef extends DeflaterOutputStream { 1443 private CRC32 crc; 1444 private Entry e; 1445 private boolean isClosed; 1446 1447 EntryOutputStreamDef(Entry e, OutputStream os) throws IOException { 1448 super(os, getDeflater()); 1449 this.e = Objects.requireNonNull(e, "Zip entry is null"); 1450 this.crc = new CRC32(); 1451 } 1452 1453 @Override 1454 public void write(byte b[], int off, int len) 1455 throws IOException { 1456 super.write(b, off, len); 1457 crc.update(b, off, len); 1458 } 1459 1460 @Override 1461 public void close() throws IOException { 1462 if (isClosed) 1463 return; 1464 isClosed = true; 1465 finish(); 1466 e.size = def.getBytesRead(); 1467 e.csize = def.getBytesWritten(); 1468 e.crc = crc.getValue(); 1469 } 1470 } 1471 1472 private InputStream getInputStream(Entry e) 1473 throws IOException 1474 { 1475 InputStream eis = null; 1476 1477 if (e.type == Entry.NEW) { 1478 // now bytes & file is uncompressed. 1479 if (e.bytes != null) 1480 return new ByteArrayInputStream(e.bytes); 1481 else if (e.file != null) 1482 return Files.newInputStream(e.file); 1483 else 1484 throw new ZipException("update entry data is missing"); 1485 } else if (e.type == Entry.FILECH) { 1486 // FILECH result is un-compressed. 1487 eis = Files.newInputStream(e.file); 1488 // TBD: wrap to hook close() 1489 // streams.add(eis); 1490 return eis; 1491 } else { // untouched CEN or COPY 1492 eis = new EntryInputStream(e, ch); 1493 } 1494 if (e.method == METHOD_DEFLATED) { 1495 // MORE: Compute good size for inflater stream: 1496 long bufSize = e.size + 2; // Inflater likes a bit of slack 1497 if (bufSize > 65536) 1498 bufSize = 8192; 1499 final long size = e.size; 1500 eis = new InflaterInputStream(eis, getInflater(), (int)bufSize) { 1501 private boolean isClosed = false; 1502 public void close() throws IOException { 1503 if (!isClosed) { 1504 releaseInflater(inf); 1505 this.in.close(); 1506 isClosed = true; 1507 streams.remove(this); 1508 } 1509 } 1510 // Override fill() method to provide an extra "dummy" byte 1511 // at the end of the input stream. This is required when 1512 // using the "nowrap" Inflater option. (it appears the new 1513 // zlib in 7 does not need it, but keep it for now) 1514 protected void fill() throws IOException { 1515 if (eof) { 1516 throw new EOFException( 1517 "Unexpected end of ZLIB input stream"); 1518 } 1519 len = this.in.read(buf, 0, buf.length); 1520 if (len == -1) { 1521 buf[0] = 0; 1522 len = 1; 1523 eof = true; 1524 } 1525 inf.setInput(buf, 0, len); 1526 } 1527 private boolean eof; 1528 1529 public int available() throws IOException { 1530 if (isClosed) 1531 return 0; 1532 long avail = size - inf.getBytesWritten(); 1533 return avail > (long) Integer.MAX_VALUE ? 1534 Integer.MAX_VALUE : (int) avail; 1535 } 1536 }; 1537 } else if (e.method == METHOD_STORED) { 1538 // TBD: wrap/ it does not seem necessary 1539 } else { 1540 throw new ZipException("invalid compression method"); 1541 } 1542 streams.add(eis); 1543 return eis; 1544 } 1545 1546 // Inner class implementing the input stream used to read 1547 // a (possibly compressed) zip file entry. 1548 private class EntryInputStream extends InputStream { 1549 private final SeekableByteChannel zfch; // local ref to zipfs's "ch". zipfs.ch might 1550 // point to a new channel after sync() 1551 private long pos; // current position within entry data 1552 protected long rem; // number of remaining bytes within entry 1553 1554 EntryInputStream(Entry e, SeekableByteChannel zfch) 1555 throws IOException 1556 { 1557 this.zfch = zfch; 1558 rem = e.csize; 1559 pos = e.locoff; 1560 if (pos == -1) { 1561 Entry e2 = getEntry(e.name); 1562 if (e2 == null) { 1563 throw new ZipException("invalid loc for entry <" + e.name + ">"); 1564 } 1565 pos = e2.locoff; 1566 } 1567 pos = -pos; // lazy initialize the real data offset 1568 } 1569 1570 public int read(byte b[], int off, int len) throws IOException { 1571 ensureOpen(); 1572 initDataPos(); 1573 if (rem == 0) { 1574 return -1; 1575 } 1576 if (len <= 0) { 1577 return 0; 1578 } 1579 if (len > rem) { 1580 len = (int) rem; 1581 } 1582 // readFullyAt() 1583 long n = 0; 1584 ByteBuffer bb = ByteBuffer.wrap(b); 1585 bb.position(off); 1586 bb.limit(off + len); 1587 synchronized(zfch) { 1588 n = zfch.position(pos).read(bb); 1589 } 1590 if (n > 0) { 1591 pos += n; 1592 rem -= n; 1593 } 1594 if (rem == 0) { 1595 close(); 1596 } 1597 return (int)n; 1598 } 1599 1600 public int read() throws IOException { 1601 byte[] b = new byte[1]; 1602 if (read(b, 0, 1) == 1) { 1603 return b[0] & 0xff; 1604 } else { 1605 return -1; 1606 } 1607 } 1608 1609 public long skip(long n) throws IOException { 1610 ensureOpen(); 1611 if (n > rem) 1612 n = rem; 1613 pos += n; 1614 rem -= n; 1615 if (rem == 0) { 1616 close(); 1617 } 1618 return n; 1619 } 1620 1621 public int available() { 1622 return rem > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) rem; 1623 } 1624 1625 public void close() { 1626 rem = 0; 1627 streams.remove(this); 1628 } 1629 1630 private void initDataPos() throws IOException { 1631 if (pos <= 0) { 1632 pos = -pos + locpos; 1633 byte[] buf = new byte[LOCHDR]; 1634 if (readFullyAt(buf, 0, buf.length, pos) != LOCHDR) { 1635 throw new ZipException("invalid loc " + pos + " for entry reading"); 1636 } 1637 pos += LOCHDR + LOCNAM(buf) + LOCEXT(buf); 1638 } 1639 } 1640 } 1641 1642 static void zerror(String msg) throws ZipException { 1643 throw new ZipException(msg); 1644 } 1645 1646 // Maxmum number of de/inflater we cache 1647 private final int MAX_FLATER = 20; 1648 // List of available Inflater objects for decompression 1649 private final List<Inflater> inflaters = new ArrayList<>(); 1650 1651 // Gets an inflater from the list of available inflaters or allocates 1652 // a new one. 1653 private Inflater getInflater() { 1654 synchronized (inflaters) { 1655 int size = inflaters.size(); 1656 if (size > 0) { 1657 Inflater inf = inflaters.remove(size - 1); 1658 return inf; 1659 } else { 1660 return new Inflater(true); 1661 } 1662 } 1663 } 1664 1665 // Releases the specified inflater to the list of available inflaters. 1666 private void releaseInflater(Inflater inf) { 1667 synchronized (inflaters) { 1668 if (inflaters.size() < MAX_FLATER) { 1669 inf.reset(); 1670 inflaters.add(inf); 1671 } else { 1672 inf.end(); 1673 } 1674 } 1675 } 1676 1677 // List of available Deflater objects for compression 1678 private final List<Deflater> deflaters = new ArrayList<>(); 1679 1680 // Gets a deflater from the list of available deflaters or allocates 1681 // a new one. 1682 private Deflater getDeflater() { 1683 synchronized (deflaters) { 1684 int size = deflaters.size(); 1685 if (size > 0) { 1686 Deflater def = deflaters.remove(size - 1); 1687 return def; 1688 } else { 1689 return new Deflater(Deflater.DEFAULT_COMPRESSION, true); 1690 } 1691 } 1692 } 1693 1694 // End of central directory record 1695 static class END { 1696 // these 2 fields are not used by anyone and write() uses "0" 1697 // int disknum; 1698 // int sdisknum; 1699 int endsub; // endsub 1700 int centot; // 4 bytes 1701 long cenlen; // 4 bytes 1702 long cenoff; // 4 bytes 1703 int comlen; // comment length 1704 byte[] comment; 1705 1706 /* members of Zip64 end of central directory locator */ 1707 // int diskNum; 1708 long endpos; 1709 // int disktot; 1710 1711 void write(OutputStream os, long offset, boolean forceEnd64) throws IOException { 1712 boolean hasZip64 = forceEnd64; // false; 1713 long xlen = cenlen; 1714 long xoff = cenoff; 1715 if (xlen >= ZIP64_MINVAL) { 1716 xlen = ZIP64_MINVAL; 1717 hasZip64 = true; 1718 } 1719 if (xoff >= ZIP64_MINVAL) { 1720 xoff = ZIP64_MINVAL; 1721 hasZip64 = true; 1722 } 1723 int count = centot; 1724 if (count >= ZIP64_MINVAL32) { 1725 count = ZIP64_MINVAL32; 1726 hasZip64 = true; 1727 } 1728 if (hasZip64) { 1729 long off64 = offset; 1730 //zip64 end of central directory record 1731 writeInt(os, ZIP64_ENDSIG); // zip64 END record signature 1732 writeLong(os, ZIP64_ENDHDR - 12); // size of zip64 end 1733 writeShort(os, 45); // version made by 1734 writeShort(os, 45); // version needed to extract 1735 writeInt(os, 0); // number of this disk 1736 writeInt(os, 0); // central directory start disk 1737 writeLong(os, centot); // number of directory entries on disk 1738 writeLong(os, centot); // number of directory entries 1739 writeLong(os, cenlen); // length of central directory 1740 writeLong(os, cenoff); // offset of central directory 1741 1742 //zip64 end of central directory locator 1743 writeInt(os, ZIP64_LOCSIG); // zip64 END locator signature 1744 writeInt(os, 0); // zip64 END start disk 1745 writeLong(os, off64); // offset of zip64 END 1746 writeInt(os, 1); // total number of disks (?) 1747 } 1748 writeInt(os, ENDSIG); // END record signature 1749 writeShort(os, 0); // number of this disk 1750 writeShort(os, 0); // central directory start disk 1751 writeShort(os, count); // number of directory entries on disk 1752 writeShort(os, count); // total number of directory entries 1753 writeInt(os, xlen); // length of central directory 1754 writeInt(os, xoff); // offset of central directory 1755 if (comment != null) { // zip file comment 1756 writeShort(os, comment.length); 1757 writeBytes(os, comment); 1758 } else { 1759 writeShort(os, 0); 1760 } 1761 } 1762 } 1763 1764 // Internal node that links a "name" to its pos in cen table. 1765 // The node itself can be used as a "key" to lookup itself in 1766 // the HashMap inodes. 1767 static class IndexNode { 1768 byte[] name; 1769 int hashcode; // node is hashable/hashed by its name 1770 int pos = -1; // position in cen table, -1 menas the 1771 // entry does not exists in zip file 1772 boolean isdir; 1773 1774 IndexNode(byte[] name, boolean isdir) { 1775 name(name); 1776 this.isdir = isdir; 1777 this.pos = -1; 1778 } 1779 1780 IndexNode(byte[] name, int pos) { 1781 name(name); 1782 this.pos = pos; 1783 } 1784 1785 // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/' 1786 IndexNode(byte[] cen, int pos, int nlen) { 1787 int noff = pos + CENHDR; 1788 if (cen[noff + nlen - 1] == '/') { 1789 isdir = true; 1790 nlen--; 1791 } 1792 if (nlen > 0 && cen[noff] == '/') { 1793 name = Arrays.copyOfRange(cen, noff, noff + nlen); 1794 } else { 1795 name = new byte[nlen + 1]; 1796 System.arraycopy(cen, noff, name, 1, nlen); 1797 name[0] = '/'; 1798 } 1799 name(name); 1800 this.pos = pos; 1801 } 1802 1803 private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>(); 1804 1805 final static IndexNode keyOf(byte[] name) { // get a lookup key; 1806 IndexNode key = cachedKey.get(); 1807 if (key == null) { 1808 key = new IndexNode(name, -1); 1809 cachedKey.set(key); 1810 } 1811 return key.as(name); 1812 } 1813 1814 final void name(byte[] name) { 1815 this.name = name; 1816 this.hashcode = Arrays.hashCode(name); 1817 } 1818 1819 final IndexNode as(byte[] name) { // reuse the node, mostly 1820 name(name); // as a lookup "key" 1821 return this; 1822 } 1823 1824 boolean isDir() { 1825 return isdir; 1826 } 1827 1828 public boolean equals(Object other) { 1829 if (!(other instanceof IndexNode)) { 1830 return false; 1831 } 1832 if (other instanceof ParentLookup) { 1833 return ((ParentLookup)other).equals(this); 1834 } 1835 return Arrays.equals(name, ((IndexNode)other).name); 1836 } 1837 1838 public int hashCode() { 1839 return hashcode; 1840 } 1841 1842 IndexNode() {} 1843 IndexNode sibling; 1844 IndexNode child; // 1st child 1845 } 1846 1847 static class Entry extends IndexNode implements ZipFileAttributes { 1848 1849 static final int CEN = 1; // entry read from cen 1850 static final int NEW = 2; // updated contents in bytes or file 1851 static final int FILECH = 3; // fch update in "file" 1852 static final int COPY = 4; // copy of a CEN entry 1853 1854 byte[] bytes; // updated content bytes 1855 Path file; // use tmp file to store bytes; 1856 int type = CEN; // default is the entry read from cen 1857 1858 // entry attributes 1859 int version; 1860 int flag; 1861 int posixPerms = -1; // posix permissions 1862 int method = -1; // compression method 1863 long mtime = -1; // last modification time (in DOS time) 1864 long atime = -1; // last access time 1865 long ctime = -1; // create time 1866 long crc = -1; // crc-32 of entry data 1867 long csize = -1; // compressed size of entry data 1868 long size = -1; // uncompressed size of entry data 1869 byte[] extra; 1870 1871 // cen 1872 1873 // these fields are not used 1874 // int versionMade; 1875 // int disk; 1876 // int attrs; 1877 // long attrsEx; 1878 long locoff; 1879 byte[] comment; 1880 1881 Entry() {} 1882 1883 Entry(byte[] name, boolean isdir, int method) { 1884 name(name); 1885 this.isdir = isdir; 1886 this.mtime = this.ctime = this.atime = System.currentTimeMillis(); 1887 this.crc = 0; 1888 this.size = 0; 1889 this.csize = 0; 1890 this.method = method; 1891 } 1892 1893 @SuppressWarnings("unchecked") 1894 Entry(byte[] name, int type, boolean isdir, int method, FileAttribute<?>... attrs) { 1895 this(name, isdir, method); 1896 this.type = type; 1897 for (FileAttribute<?> attr: attrs) { 1898 String attrName = attr.name(); 1899 if (attrName.equals("posix:permissions") || attrName.equals("unix:permissions")) { 1900 posixPerms = PosixFilePermissions.toFlags((Set<PosixFilePermission>)attr.value()); 1901 } 1902 } 1903 } 1904 1905 Entry(Entry e, int type) { 1906 name(e.name); 1907 this.isdir = e.isdir; 1908 this.version = e.version; 1909 this.ctime = e.ctime; 1910 this.atime = e.atime; 1911 this.mtime = e.mtime; 1912 this.crc = e.crc; 1913 this.size = e.size; 1914 this.csize = e.csize; 1915 this.method = e.method; 1916 this.extra = e.extra; 1917 /* 1918 this.versionMade = e.versionMade; 1919 this.disk = e.disk; 1920 this.attrs = e.attrs; 1921 this.attrsEx = e.attrsEx; 1922 */ 1923 this.locoff = e.locoff; 1924 this.comment = e.comment; 1925 this.posixPerms = e.posixPerms; 1926 this.type = type; 1927 } 1928 1929 @SuppressWarnings("unchecked") 1930 Entry(byte[] name, Path file, int type, FileAttribute<?>... attrs) { 1931 this(name, type, false, METHOD_STORED); 1932 this.file = file; 1933 for (FileAttribute<?> attr: attrs) { 1934 String attrName = attr.name(); 1935 if (attrName.equals("posix:permissions") || attrName.equals("unix:permissions")) { 1936 posixPerms = PosixFilePermissions.toFlags((Set<PosixFilePermission>)attr.value()); 1937 } 1938 } 1939 } 1940 1941 int version(boolean zip64) throws ZipException { 1942 if (zip64) { 1943 return 45; 1944 } 1945 if (method == METHOD_DEFLATED) 1946 return 20; 1947 else if (method == METHOD_STORED) 1948 return 10; 1949 throw new ZipException("unsupported compression method"); 1950 } 1951 1952 /** 1953 * Adds information about compatibility of file attribute information 1954 * to a version value. 1955 */ 1956 int versionMadeBy(int version) { 1957 return (posixPerms < 0) ? version : 1958 VERSION_BASE_UNIX | (version & 0xff); 1959 } 1960 1961 ///////////////////// CEN ////////////////////// 1962 static Entry readCEN(ZipFileSystem zipfs, IndexNode inode) 1963 throws IOException 1964 { 1965 return new Entry().cen(zipfs, inode); 1966 } 1967 1968 private Entry cen(ZipFileSystem zipfs, IndexNode inode) 1969 throws IOException 1970 { 1971 byte[] cen = zipfs.cen; 1972 int pos = inode.pos; 1973 if (!cenSigAt(cen, pos)) 1974 zerror("invalid CEN header (bad signature)"); 1975 version = CENVER(cen, pos); 1976 flag = CENFLG(cen, pos); 1977 method = CENHOW(cen, pos); 1978 mtime = dosToJavaTime(CENTIM(cen, pos)); 1979 crc = CENCRC(cen, pos); 1980 csize = CENSIZ(cen, pos); 1981 size = CENLEN(cen, pos); 1982 int nlen = CENNAM(cen, pos); 1983 int elen = CENEXT(cen, pos); 1984 int clen = CENCOM(cen, pos); 1985 /* 1986 versionMade = CENVEM(cen, pos); 1987 disk = CENDSK(cen, pos); 1988 attrs = CENATT(cen, pos); 1989 attrsEx = CENATX(cen, pos); 1990 */ 1991 if (CENVEM_FA(cen, pos) == FILE_ATTRIBUTES_UNIX) { 1992 posixPerms = CENATX_PERMS(cen, pos) & 0xFFF; // 12 bits for setuid, setgid, sticky + perms 1993 } 1994 locoff = CENOFF(cen, pos); 1995 pos += CENHDR; 1996 this.name = inode.name; 1997 this.isdir = inode.isdir; 1998 this.hashcode = inode.hashcode; 1999 2000 pos += nlen; 2001 if (elen > 0) { 2002 extra = Arrays.copyOfRange(cen, pos, pos + elen); 2003 pos += elen; 2004 readExtra(zipfs); 2005 } 2006 if (clen > 0) { 2007 comment = Arrays.copyOfRange(cen, pos, pos + clen); 2008 } 2009 return this; 2010 } 2011 2012 int writeCEN(OutputStream os) throws IOException { 2013 long csize0 = csize; 2014 long size0 = size; 2015 long locoff0 = locoff; 2016 int elen64 = 0; // extra for ZIP64 2017 int elenNTFS = 0; // extra for NTFS (a/c/mtime) 2018 int elenEXTT = 0; // extra for Extended Timestamp 2019 boolean foundExtraTime = false; // if time stamp NTFS, EXTT present 2020 2021 byte[] zname = isdir ? toDirectoryPath(name) : name; 2022 2023 // confirm size/length 2024 int nlen = (zname != null) ? zname.length - 1 : 0; // name has [0] as "slash" 2025 int elen = (extra != null) ? extra.length : 0; 2026 int eoff = 0; 2027 int clen = (comment != null) ? comment.length : 0; 2028 if (csize >= ZIP64_MINVAL) { 2029 csize0 = ZIP64_MINVAL; 2030 elen64 += 8; // csize(8) 2031 } 2032 if (size >= ZIP64_MINVAL) { 2033 size0 = ZIP64_MINVAL; // size(8) 2034 elen64 += 8; 2035 } 2036 if (locoff >= ZIP64_MINVAL) { 2037 locoff0 = ZIP64_MINVAL; 2038 elen64 += 8; // offset(8) 2039 } 2040 if (elen64 != 0) { 2041 elen64 += 4; // header and data sz 4 bytes 2042 } 2043 boolean zip64 = (elen64 != 0); 2044 int version0 = version(zip64); 2045 while (eoff + 4 < elen) { 2046 int tag = SH(extra, eoff); 2047 int sz = SH(extra, eoff + 2); 2048 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2049 foundExtraTime = true; 2050 } 2051 eoff += (4 + sz); 2052 } 2053 if (!foundExtraTime) { 2054 if (isWindows) { // use NTFS 2055 elenNTFS = 36; // total 36 bytes 2056 } else { // Extended Timestamp otherwise 2057 elenEXTT = 9; // only mtime in cen 2058 } 2059 } 2060 writeInt(os, CENSIG); // CEN header signature 2061 writeShort(os, versionMadeBy(version0)); // version made by 2062 writeShort(os, version0); // version needed to extract 2063 writeShort(os, flag); // general purpose bit flag 2064 writeShort(os, method); // compression method 2065 // last modification time 2066 writeInt(os, (int)javaToDosTime(mtime)); 2067 writeInt(os, crc); // crc-32 2068 writeInt(os, csize0); // compressed size 2069 writeInt(os, size0); // uncompressed size 2070 writeShort(os, nlen); 2071 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2072 2073 if (comment != null) { 2074 writeShort(os, Math.min(clen, 0xffff)); 2075 } else { 2076 writeShort(os, 0); 2077 } 2078 writeShort(os, 0); // starting disk number 2079 writeShort(os, 0); // internal file attributes (unused) 2080 writeInt(os, posixPerms > 0 ? posixPerms << 16 : 0); // external file 2081 // attributes, used for storing posix 2082 // permissions 2083 writeInt(os, locoff0); // relative offset of local header 2084 writeBytes(os, zname, 1, nlen); 2085 if (zip64) { 2086 writeShort(os, EXTID_ZIP64);// Zip64 extra 2087 writeShort(os, elen64 - 4); // size of "this" extra block 2088 if (size0 == ZIP64_MINVAL) 2089 writeLong(os, size); 2090 if (csize0 == ZIP64_MINVAL) 2091 writeLong(os, csize); 2092 if (locoff0 == ZIP64_MINVAL) 2093 writeLong(os, locoff); 2094 } 2095 if (elenNTFS != 0) { 2096 writeShort(os, EXTID_NTFS); 2097 writeShort(os, elenNTFS - 4); 2098 writeInt(os, 0); // reserved 2099 writeShort(os, 0x0001); // NTFS attr tag 2100 writeShort(os, 24); 2101 writeLong(os, javaToWinTime(mtime)); 2102 writeLong(os, javaToWinTime(atime)); 2103 writeLong(os, javaToWinTime(ctime)); 2104 } 2105 if (elenEXTT != 0) { 2106 writeShort(os, EXTID_EXTT); 2107 writeShort(os, elenEXTT - 4); 2108 if (ctime == -1) 2109 os.write(0x3); // mtime and atime 2110 else 2111 os.write(0x7); // mtime, atime and ctime 2112 writeInt(os, javaToUnixTime(mtime)); 2113 } 2114 if (extra != null) // whatever not recognized 2115 writeBytes(os, extra); 2116 if (comment != null) //TBD: 0, Math.min(commentBytes.length, 0xffff)); 2117 writeBytes(os, comment); 2118 return CENHDR + nlen + elen + clen + elen64 + elenNTFS + elenEXTT; 2119 } 2120 2121 ///////////////////// LOC ////////////////////// 2122 2123 int writeLOC(OutputStream os) throws IOException { 2124 writeInt(os, LOCSIG); // LOC header signature 2125 byte[] zname = isdir ? toDirectoryPath(name) : name; 2126 int nlen = (zname != null) ? zname.length - 1 : 0; // [0] is slash 2127 int elen = (extra != null) ? extra.length : 0; 2128 boolean foundExtraTime = false; // if extra timestamp present 2129 int eoff = 0; 2130 int elen64 = 0; 2131 boolean zip64 = false; 2132 int elenEXTT = 0; 2133 int elenNTFS = 0; 2134 if ((flag & FLAG_DATADESCR) != 0) { 2135 writeShort(os, version(zip64)); // version needed to extract 2136 writeShort(os, flag); // general purpose bit flag 2137 writeShort(os, method); // compression method 2138 // last modification time 2139 writeInt(os, (int)javaToDosTime(mtime)); 2140 // store size, uncompressed size, and crc-32 in data descriptor 2141 // immediately following compressed entry data 2142 writeInt(os, 0); 2143 writeInt(os, 0); 2144 writeInt(os, 0); 2145 } else { 2146 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2147 elen64 = 20; //headid(2) + size(2) + size(8) + csize(8) 2148 zip64 = true; 2149 } 2150 writeShort(os, version(zip64)); // version needed to extract 2151 writeShort(os, flag); // general purpose bit flag 2152 writeShort(os, method); // compression method 2153 // last modification time 2154 writeInt(os, (int)javaToDosTime(mtime)); 2155 writeInt(os, crc); // crc-32 2156 if (zip64) { 2157 writeInt(os, ZIP64_MINVAL); 2158 writeInt(os, ZIP64_MINVAL); 2159 } else { 2160 writeInt(os, csize); // compressed size 2161 writeInt(os, size); // uncompressed size 2162 } 2163 } 2164 while (eoff + 4 < elen) { 2165 int tag = SH(extra, eoff); 2166 int sz = SH(extra, eoff + 2); 2167 if (tag == EXTID_EXTT || tag == EXTID_NTFS) { 2168 foundExtraTime = true; 2169 } 2170 eoff += (4 + sz); 2171 } 2172 if (!foundExtraTime) { 2173 if (isWindows) { 2174 elenNTFS = 36; // NTFS, total 36 bytes 2175 } else { // on unix use "ext time" 2176 elenEXTT = 9; 2177 if (atime != -1) 2178 elenEXTT += 4; 2179 if (ctime != -1) 2180 elenEXTT += 4; 2181 } 2182 } 2183 writeShort(os, nlen); 2184 writeShort(os, elen + elen64 + elenNTFS + elenEXTT); 2185 writeBytes(os, zname, 1, nlen); 2186 if (zip64) { 2187 writeShort(os, EXTID_ZIP64); 2188 writeShort(os, 16); 2189 writeLong(os, size); 2190 writeLong(os, csize); 2191 } 2192 if (elenNTFS != 0) { 2193 writeShort(os, EXTID_NTFS); 2194 writeShort(os, elenNTFS - 4); 2195 writeInt(os, 0); // reserved 2196 writeShort(os, 0x0001); // NTFS attr tag 2197 writeShort(os, 24); 2198 writeLong(os, javaToWinTime(mtime)); 2199 writeLong(os, javaToWinTime(atime)); 2200 writeLong(os, javaToWinTime(ctime)); 2201 } 2202 if (elenEXTT != 0) { 2203 writeShort(os, EXTID_EXTT); 2204 writeShort(os, elenEXTT - 4);// size for the folowing data block 2205 int fbyte = 0x1; 2206 if (atime != -1) // mtime and atime 2207 fbyte |= 0x2; 2208 if (ctime != -1) // mtime, atime and ctime 2209 fbyte |= 0x4; 2210 os.write(fbyte); // flags byte 2211 writeInt(os, javaToUnixTime(mtime)); 2212 if (atime != -1) 2213 writeInt(os, javaToUnixTime(atime)); 2214 if (ctime != -1) 2215 writeInt(os, javaToUnixTime(ctime)); 2216 } 2217 if (extra != null) { 2218 writeBytes(os, extra); 2219 } 2220 return LOCHDR + nlen + elen + elen64 + elenNTFS + elenEXTT; 2221 } 2222 2223 // Data Descriptior 2224 int writeEXT(OutputStream os) throws IOException { 2225 writeInt(os, EXTSIG); // EXT header signature 2226 writeInt(os, crc); // crc-32 2227 if (csize >= ZIP64_MINVAL || size >= ZIP64_MINVAL) { 2228 writeLong(os, csize); 2229 writeLong(os, size); 2230 return 24; 2231 } else { 2232 writeInt(os, csize); // compressed size 2233 writeInt(os, size); // uncompressed size 2234 return 16; 2235 } 2236 } 2237 2238 // read NTFS, UNIX and ZIP64 data from cen.extra 2239 void readExtra(ZipFileSystem zipfs) throws IOException { 2240 if (extra == null) 2241 return; 2242 int elen = extra.length; 2243 int off = 0; 2244 int newOff = 0; 2245 while (off + 4 < elen) { 2246 // extra spec: HeaderID+DataSize+Data 2247 int pos = off; 2248 int tag = SH(extra, pos); 2249 int sz = SH(extra, pos + 2); 2250 pos += 4; 2251 if (pos + sz > elen) // invalid data 2252 break; 2253 switch (tag) { 2254 case EXTID_ZIP64 : 2255 if (size == ZIP64_MINVAL) { 2256 if (pos + 8 > elen) // invalid zip64 extra 2257 break; // fields, just skip 2258 size = LL(extra, pos); 2259 pos += 8; 2260 } 2261 if (csize == ZIP64_MINVAL) { 2262 if (pos + 8 > elen) 2263 break; 2264 csize = LL(extra, pos); 2265 pos += 8; 2266 } 2267 if (locoff == ZIP64_MINVAL) { 2268 if (pos + 8 > elen) 2269 break; 2270 locoff = LL(extra, pos); 2271 pos += 8; 2272 } 2273 break; 2274 case EXTID_NTFS: 2275 if (sz < 32) 2276 break; 2277 pos += 4; // reserved 4 bytes 2278 if (SH(extra, pos) != 0x0001) 2279 break; 2280 if (SH(extra, pos + 2) != 24) 2281 break; 2282 // override the loc field, datatime here is 2283 // more "accurate" 2284 mtime = winToJavaTime(LL(extra, pos + 4)); 2285 atime = winToJavaTime(LL(extra, pos + 12)); 2286 ctime = winToJavaTime(LL(extra, pos + 20)); 2287 break; 2288 case EXTID_EXTT: 2289 // spec says the Extened timestamp in cen only has mtime 2290 // need to read the loc to get the extra a/ctime, if flag 2291 // "zipinfo-time" is not specified to false; 2292 // there is performance cost (move up to loc and read) to 2293 // access the loc table foreach entry; 2294 if (zipfs.noExtt) { 2295 if (sz == 5) 2296 mtime = unixToJavaTime(LG(extra, pos + 1)); 2297 break; 2298 } 2299 byte[] buf = new byte[LOCHDR]; 2300 if (zipfs.readFullyAt(buf, 0, buf.length , locoff) 2301 != buf.length) 2302 throw new ZipException("loc: reading failed"); 2303 if (!locSigAt(buf, 0)) 2304 throw new ZipException("loc: wrong sig ->" 2305 + Long.toString(getSig(buf, 0), 16)); 2306 int locElen = LOCEXT(buf); 2307 if (locElen < 9) // EXTT is at lease 9 bytes 2308 break; 2309 int locNlen = LOCNAM(buf); 2310 buf = new byte[locElen]; 2311 if (zipfs.readFullyAt(buf, 0, buf.length , locoff + LOCHDR + locNlen) 2312 != buf.length) 2313 throw new ZipException("loc extra: reading failed"); 2314 int locPos = 0; 2315 while (locPos + 4 < buf.length) { 2316 int locTag = SH(buf, locPos); 2317 int locSZ = SH(buf, locPos + 2); 2318 locPos += 4; 2319 if (locTag != EXTID_EXTT) { 2320 locPos += locSZ; 2321 continue; 2322 } 2323 int end = locPos + locSZ - 4; 2324 int flag = CH(buf, locPos++); 2325 if ((flag & 0x1) != 0 && locPos <= end) { 2326 mtime = unixToJavaTime(LG(buf, locPos)); 2327 locPos += 4; 2328 } 2329 if ((flag & 0x2) != 0 && locPos <= end) { 2330 atime = unixToJavaTime(LG(buf, locPos)); 2331 locPos += 4; 2332 } 2333 if ((flag & 0x4) != 0 && locPos <= end) { 2334 ctime = unixToJavaTime(LG(buf, locPos)); 2335 locPos += 4; 2336 } 2337 break; 2338 } 2339 break; 2340 default: // unknown tag 2341 System.arraycopy(extra, off, extra, newOff, sz + 4); 2342 newOff += (sz + 4); 2343 } 2344 off += (sz + 4); 2345 } 2346 if (newOff != 0 && newOff != extra.length) 2347 extra = Arrays.copyOf(extra, newOff); 2348 else 2349 extra = null; 2350 } 2351 2352 ///////// basic file attributes /////////// 2353 @Override 2354 public FileTime creationTime() { 2355 return FileTime.fromMillis(ctime == -1 ? mtime : ctime); 2356 } 2357 2358 @Override 2359 public boolean isDirectory() { 2360 return isDir(); 2361 } 2362 2363 @Override 2364 public boolean isOther() { 2365 return false; 2366 } 2367 2368 @Override 2369 public boolean isRegularFile() { 2370 return !isDir(); 2371 } 2372 2373 @Override 2374 public FileTime lastAccessTime() { 2375 return FileTime.fromMillis(atime == -1 ? mtime : atime); 2376 } 2377 2378 @Override 2379 public FileTime lastModifiedTime() { 2380 return FileTime.fromMillis(mtime); 2381 } 2382 2383 @Override 2384 public long size() { 2385 return size; 2386 } 2387 2388 @Override 2389 public boolean isSymbolicLink() { 2390 return false; 2391 } 2392 2393 @Override 2394 public Object fileKey() { 2395 return null; 2396 } 2397 2398 ///////// zip entry attributes /////////// 2399 public long compressedSize() { 2400 return csize; 2401 } 2402 2403 public long crc() { 2404 return crc; 2405 } 2406 2407 public int method() { 2408 return method; 2409 } 2410 2411 public byte[] extra() { 2412 if (extra != null) 2413 return Arrays.copyOf(extra, extra.length); 2414 return null; 2415 } 2416 2417 public byte[] comment() { 2418 if (comment != null) 2419 return Arrays.copyOf(comment, comment.length); 2420 return null; 2421 } 2422 2423 public String toString() { 2424 StringBuilder sb = new StringBuilder(1024); 2425 Formatter fm = new Formatter(sb); 2426 fm.format(" name : %s%n", new String(name)); 2427 fm.format(" creationTime : %tc%n", creationTime().toMillis()); 2428 fm.format(" lastAccessTime : %tc%n", lastAccessTime().toMillis()); 2429 fm.format(" lastModifiedTime: %tc%n", lastModifiedTime().toMillis()); 2430 fm.format(" isRegularFile : %b%n", isRegularFile()); 2431 fm.format(" isDirectory : %b%n", isDirectory()); 2432 fm.format(" isSymbolicLink : %b%n", isSymbolicLink()); 2433 fm.format(" isOther : %b%n", isOther()); 2434 fm.format(" fileKey : %s%n", fileKey()); 2435 fm.format(" size : %d%n", size()); 2436 fm.format(" compressedSize : %d%n", compressedSize()); 2437 fm.format(" crc : %x%n", crc()); 2438 fm.format(" method : %d%n", method()); 2439 if (posixPerms != -1) { 2440 fm.format(" permissions : %s%n", permissions()); 2441 } 2442 fm.close(); 2443 return sb.toString(); 2444 } 2445 2446 @Override 2447 public UserPrincipal owner() { 2448 throw new UnsupportedOperationException( 2449 "ZipFileSystem does not support owner."); 2450 } 2451 2452 @Override 2453 public GroupPrincipal group() { 2454 throw new UnsupportedOperationException( 2455 "ZipFileSystem does not support group."); 2456 } 2457 2458 @Override 2459 public Set<PosixFilePermission> permissions() { 2460 if (posixPerms == -1) { 2461 // in case there are no Posix permissions associated with the 2462 // entry, we should not return an empty set of permissions 2463 // because that would be an explicit set of permissions meaning 2464 // no permissions for anyone 2465 throw new UnsupportedOperationException( 2466 "No posix permissions associated with zip entry."); 2467 } 2468 return PosixFilePermissions.fromFlags(posixPerms); 2469 } 2470 2471 @Override 2472 public void setPermissions(Set<PosixFilePermission> perms) { 2473 if (perms == null) { 2474 posixPerms = -1; 2475 return; 2476 } 2477 posixPerms = PosixFilePermissions.toFlags(perms); 2478 } 2479 } 2480 2481 // ZIP directory has two issues: 2482 // (1) ZIP spec does not require the ZIP file to include 2483 // directory entry 2484 // (2) all entries are not stored/organized in a "tree" 2485 // structure. 2486 // A possible solution is to build the node tree ourself as 2487 // implemented below. 2488 2489 // default time stamp for pseudo entries 2490 private long zfsDefaultTimeStamp = System.currentTimeMillis(); 2491 2492 private void removeFromTree(IndexNode inode) { 2493 IndexNode parent = inodes.get(LOOKUPKEY.as(getParent(inode.name))); 2494 IndexNode child = parent.child; 2495 if (child.equals(inode)) { 2496 parent.child = child.sibling; 2497 } else { 2498 IndexNode last = child; 2499 while ((child = child.sibling) != null) { 2500 if (child.equals(inode)) { 2501 last.sibling = child.sibling; 2502 break; 2503 } else { 2504 last = child; 2505 } 2506 } 2507 } 2508 } 2509 2510 // purely for parent lookup, so we don't have to copy the parent 2511 // name every time 2512 static class ParentLookup extends IndexNode { 2513 int len; 2514 ParentLookup() {} 2515 2516 final ParentLookup as(byte[] name, int len) { // as a lookup "key" 2517 name(name, len); 2518 return this; 2519 } 2520 2521 void name(byte[] name, int len) { 2522 this.name = name; 2523 this.len = len; 2524 // calculate the hashcode the same way as Arrays.hashCode() does 2525 int result = 1; 2526 for (int i = 0; i < len; i++) 2527 result = 31 * result + name[i]; 2528 this.hashcode = result; 2529 } 2530 2531 @Override 2532 public boolean equals(Object other) { 2533 if (!(other instanceof IndexNode)) { 2534 return false; 2535 } 2536 byte[] oname = ((IndexNode)other).name; 2537 return Arrays.equals(name, 0, len, 2538 oname, 0, oname.length); 2539 } 2540 2541 } 2542 2543 private void buildNodeTree() throws IOException { 2544 beginWrite(); 2545 try { 2546 IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH)); 2547 if (root == null) { 2548 root = new IndexNode(ROOTPATH, true); 2549 } else { 2550 inodes.remove(root); 2551 } 2552 IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]); 2553 inodes.put(root, root); 2554 ParentLookup lookup = new ParentLookup(); 2555 for (IndexNode node : nodes) { 2556 IndexNode parent; 2557 while (true) { 2558 int off = getParentOff(node.name); 2559 if (off <= 1) { // parent is root 2560 node.sibling = root.child; 2561 root.child = node; 2562 break; 2563 } 2564 lookup = lookup.as(node.name, off); 2565 if (inodes.containsKey(lookup)) { 2566 parent = inodes.get(lookup); 2567 node.sibling = parent.child; 2568 parent.child = node; 2569 break; 2570 } 2571 // add new pseudo directory entry 2572 parent = new IndexNode(Arrays.copyOf(node.name, off), true); 2573 inodes.put(parent, parent); 2574 node.sibling = parent.child; 2575 parent.child = node; 2576 node = parent; 2577 } 2578 } 2579 } finally { 2580 endWrite(); 2581 } 2582 } 2583 }