001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.commons.compress.archivers.zip; 018 019import java.io.ByteArrayOutputStream; 020import java.io.File; 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.nio.ByteBuffer; 025import java.nio.channels.SeekableByteChannel; 026import java.nio.file.Files; 027import java.nio.file.LinkOption; 028import java.nio.file.OpenOption; 029import java.nio.file.Path; 030import java.nio.file.StandardOpenOption; 031import java.util.EnumSet; 032import java.util.HashMap; 033import java.util.LinkedList; 034import java.util.List; 035import java.util.Map; 036import java.util.zip.Deflater; 037import java.util.zip.ZipException; 038 039import org.apache.commons.compress.archivers.ArchiveEntry; 040import org.apache.commons.compress.archivers.ArchiveOutputStream; 041import org.apache.commons.compress.utils.ByteUtils; 042import org.apache.commons.compress.utils.IOUtils; 043 044/** 045 * Reimplementation of {@link java.util.zip.ZipOutputStream 046 * java.util.zip.ZipOutputStream} to handle the extended 047 * functionality of this package, especially internal/external file 048 * attributes and extra fields with different layouts for local file 049 * data and central directory entries. 050 * 051 * <p>This class will try to use {@link 052 * java.nio.channels.SeekableByteChannel} when it knows that the 053 * output is going to go to a file and no split archive shall be 054 * created.</p> 055 * 056 * <p>If SeekableByteChannel cannot be used, this implementation will use 057 * a Data Descriptor to store size and CRC information for {@link 058 * #DEFLATED DEFLATED} entries, you don't need to 059 * calculate them yourself. Unfortunately, this is not possible for 060 * the {@link #STORED STORED} method, where setting the CRC and 061 * uncompressed size information is required before {@link 062 * #putArchiveEntry(ArchiveEntry)} can be called.</p> 063 * 064 * <p>As of Apache Commons Compress 1.3, the class transparently supports Zip64 065 * extensions and thus individual entries and archives larger than 4 066 * GB or with more than 65536 entries in most cases but explicit 067 * control is provided via {@link #setUseZip64}. If the stream can not 068 * use SeekableByteChannel and you try to write a ZipArchiveEntry of 069 * unknown size, then Zip64 extensions will be disabled by default.</p> 070 * 071 * @NotThreadSafe 072 */ 073public class ZipArchiveOutputStream extends ArchiveOutputStream { 074 075 /** 076 * Structure collecting information for the entry that is 077 * currently being written. 078 */ 079 private static final class CurrentEntry { 080 081 /** 082 * Current ZIP entry. 083 */ 084 private final ZipArchiveEntry entry; 085 086 /** 087 * Offset for CRC entry in the local file header data for the 088 * current entry starts here. 089 */ 090 private long localDataStart; 091 092 /** 093 * Data for local header data 094 */ 095 private long dataStart; 096 097 /** 098 * Number of bytes read for the current entry (can't rely on 099 * Deflater#getBytesRead) when using DEFLATED. 100 */ 101 private long bytesRead; 102 103 /** 104 * Whether current entry was the first one using ZIP64 features. 105 */ 106 private boolean causedUseOfZip64; 107 108 /** 109 * Whether write() has been called at all. 110 * 111 * <p>In order to create a valid archive {@link 112 * #closeArchiveEntry closeArchiveEntry} will write an empty 113 * array to get the CRC right if nothing has been written to 114 * the stream at all.</p> 115 */ 116 private boolean hasWritten; 117 118 private CurrentEntry(final ZipArchiveEntry entry) { 119 this.entry = entry; 120 } 121 } 122 123 private static final class EntryMetaData { 124 private final long offset; 125 private final boolean usesDataDescriptor; 126 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 127 this.offset = offset; 128 this.usesDataDescriptor = usesDataDescriptor; 129 } 130 } 131 132 /** 133 * enum that represents the possible policies for creating Unicode 134 * extra fields. 135 */ 136 public static final class UnicodeExtraFieldPolicy { 137 138 /** 139 * Always create Unicode extra fields. 140 */ 141 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 142 143 /** 144 * Never create Unicode extra fields. 145 */ 146 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 147 148 /** 149 * Create Unicode extra fields for file names that cannot be 150 * encoded using the specified encoding. 151 */ 152 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = new UnicodeExtraFieldPolicy("not encodeable"); 153 154 private final String name; 155 private UnicodeExtraFieldPolicy(final String n) { 156 name = n; 157 } 158 159 @Override 160 public String toString() { 161 return name; 162 } 163 } 164 165 static final int BUFFER_SIZE = 512; 166 private static final int LFH_SIG_OFFSET = 0; 167 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 168 private static final int LFH_GPB_OFFSET = 6; 169 private static final int LFH_METHOD_OFFSET = 8; 170 private static final int LFH_TIME_OFFSET = 10; 171 private static final int LFH_CRC_OFFSET = 14; 172 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 173 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 174 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 175 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 176 private static final int LFH_FILENAME_OFFSET = 30; 177 private static final int CFH_SIG_OFFSET = 0; 178 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 179 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 180 private static final int CFH_GPB_OFFSET = 8; 181 private static final int CFH_METHOD_OFFSET = 10; 182 private static final int CFH_TIME_OFFSET = 12; 183 private static final int CFH_CRC_OFFSET = 16; 184 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 185 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 186 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 187 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 188 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 189 private static final int CFH_DISK_NUMBER_OFFSET = 34; 190 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 191 192 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 193 194 private static final int CFH_LFH_OFFSET = 42; 195 196 private static final int CFH_FILENAME_OFFSET = 46; 197 198 /** 199 * Compression method for deflated entries. 200 */ 201 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 202 203 /** 204 * Default compression level for deflated entries. 205 */ 206 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 207 208 /** 209 * Compression method for stored entries. 210 */ 211 public static final int STORED = java.util.zip.ZipEntry.STORED; 212 213 /** 214 * default encoding for file names and comment. 215 */ 216 static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8; 217 218 /** 219 * General purpose flag, which indicates that file names are 220 * written in UTF-8. 221 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 222 */ 223 @Deprecated 224 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 225 226 /** 227 * Helper, a 0 as ZipShort. 228 */ 229 private static final byte[] ZERO = {0, 0}; 230 231 /** 232 * Helper, a 0 as ZipLong. 233 */ 234 private static final byte[] LZERO = {0, 0, 0, 0}; 235 236 private static final byte[] ONE = ZipLong.getBytes(1L); 237 238 /* 239 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 240 */ 241 /** 242 * local file header signature 243 */ 244 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); //NOSONAR 245 246 /** 247 * data descriptor signature 248 */ 249 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); //NOSONAR 250 251 /** 252 * central file header signature 253 */ 254 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); //NOSONAR 255 256 /** 257 * end of central dir signature 258 */ 259 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); //NOSONAR 260 261 /** 262 * ZIP64 end of central dir signature 263 */ 264 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); //NOSONAR 265 266 /** 267 * ZIP64 end of central dir locator signature 268 */ 269 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); //NOSONAR 270 271 /** indicates if this archive is finished. protected for use in Jar implementation */ 272 protected boolean finished; 273 274 /** 275 * Current entry. 276 */ 277 private CurrentEntry entry; 278 279 /** 280 * The file comment. 281 */ 282 private String comment = ""; 283 284 /** 285 * Compression level for next entry. 286 */ 287 private int level = DEFAULT_COMPRESSION; 288 289 /** 290 * Has the compression level changed when compared to the last 291 * entry? 292 */ 293 private boolean hasCompressionLevelChanged; 294 295 /** 296 * Default compression method for next entry. 297 */ 298 private int method = java.util.zip.ZipEntry.DEFLATED; 299 300 /** 301 * List of ZipArchiveEntries written so far. 302 */ 303 private final List<ZipArchiveEntry> entries = new LinkedList<>(); 304 305 private final StreamCompressor streamCompressor; 306 307 /** 308 * Start of central directory. 309 */ 310 private long cdOffset; 311 312 /** 313 * Length of central directory. 314 */ 315 private long cdLength; 316 317 /** 318 * Disk number start of central directory. 319 */ 320 private long cdDiskNumberStart; 321 322 /** 323 * Length of end of central directory 324 */ 325 private long eocdLength; 326 327 /** 328 * Holds some book-keeping data for each entry. 329 */ 330 private final Map<ZipArchiveEntry, EntryMetaData> metaData = new HashMap<>(); 331 332 /** 333 * The encoding to use for file names and the file comment. 334 * 335 * <p>For a list of possible values see <a 336 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 337 * Defaults to UTF-8.</p> 338 */ 339 private String encoding = DEFAULT_ENCODING; 340 341 /** 342 * The ZIP encoding to use for file names and the file comment. 343 * 344 * This field is of internal use and will be set in {@link 345 * #setEncoding(String)}. 346 */ 347 private ZipEncoding zipEncoding = ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 348 349 /** 350 * This Deflater object is used for output. 351 * 352 */ 353 protected final Deflater def; 354 355 /** 356 * Optional random access output. 357 */ 358 private final SeekableByteChannel channel; 359 360 private final OutputStream outputStream; 361 362 /** 363 * whether to use the general purpose bit flag when writing UTF-8 364 * file names or not. 365 */ 366 private boolean useUTF8Flag = true; 367 368 /** 369 * Whether to encode non-encodable file names as UTF-8. 370 */ 371 private boolean fallbackToUTF8; 372 373 /** 374 * whether to create UnicodePathExtraField-s for each entry. 375 */ 376 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 377 378 /** 379 * Whether anything inside this archive has used a ZIP64 feature. 380 * 381 * @since 1.3 382 */ 383 private boolean hasUsedZip64; 384 385 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 386 387 private final byte[] copyBuffer = new byte[32768]; 388 389 /** 390 * Whether we are creating a split zip 391 */ 392 private final boolean isSplitZip; 393 394 /** 395 * Holds the number of Central Directories on each disk, this is used 396 * when writing Zip64 End Of Central Directory and End Of Central Directory 397 */ 398 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 399 400 /** 401 * Creates a new ZIP OutputStream writing to a File. Will use 402 * random access if possible. 403 * @param file the file to ZIP to 404 * @throws IOException on error 405 */ 406 public ZipArchiveOutputStream(final File file) throws IOException { 407 this(file.toPath()); 408 } 409 410 /** 411 * Creates a split ZIP Archive. 412 * 413 * <p>The files making up the archive will use Z01, Z02, 414 * ... extensions and the last part of it will be the given {@code 415 * file}.</p> 416 * 417 * <p>Even though the stream writes to a file this stream will 418 * behave as if no random access was possible. This means the 419 * sizes of stored entries need to be known before the actual 420 * entry data is written.</p> 421 * 422 * @param file the file that will become the last part of the split archive 423 * @param zipSplitSize maximum size of a single part of the split 424 * archive created by this stream. Must be between 64kB and about 425 * 4GB. 426 * 427 * @throws IOException on error 428 * @throws IllegalArgumentException if zipSplitSize is not in the required range 429 * @since 1.20 430 */ 431 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 432 this(file.toPath(), zipSplitSize); 433 } 434 435 /** 436 * Creates a new ZIP OutputStream filtering the underlying stream. 437 * @param out the outputstream to zip 438 */ 439 public ZipArchiveOutputStream(final OutputStream out) { 440 this.outputStream = out; 441 this.channel = null; 442 def = new Deflater(level, true); 443 streamCompressor = StreamCompressor.create(out, def); 444 isSplitZip = false; 445 } 446 447 /** 448 * Creates a split ZIP Archive. 449 * <p>The files making up the archive will use Z01, Z02, 450 * ... extensions and the last part of it will be the given {@code 451 * file}.</p> 452 * <p>Even though the stream writes to a file this stream will 453 * behave as if no random access was possible. This means the 454 * sizes of stored entries need to be known before the actual 455 * entry data is written.</p> 456 * @param path the path to the file that will become the last part of the split archive 457 * @param zipSplitSize maximum size of a single part of the split 458 * archive created by this stream. Must be between 64kB and about 4GB. 459 * @throws IOException on error 460 * @throws IllegalArgumentException if zipSplitSize is not in the required range 461 * @since 1.22 462 */ 463 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 464 def = new Deflater(level, true); 465 this.outputStream = new ZipSplitOutputStream(path, zipSplitSize); 466 streamCompressor = StreamCompressor.create(this.outputStream, def); 467 channel = null; 468 isSplitZip = true; 469 } 470 471 /** 472 * Creates a new ZIP OutputStream writing to a Path. Will use 473 * random access if possible. 474 * @param file the file to ZIP to 475 * @param options options specifying how the file is opened. 476 * @throws IOException on error 477 * @since 1.21 478 */ 479 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 480 def = new Deflater(level, true); 481 OutputStream outputStream = null; 482 SeekableByteChannel channel = null; 483 StreamCompressor streamCompressor; 484 try { 485 channel = Files.newByteChannel(file, 486 EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE, 487 StandardOpenOption.READ, 488 StandardOpenOption.TRUNCATE_EXISTING)); 489 // will never get opened properly when an exception is thrown so doesn't need to get closed 490 streamCompressor = StreamCompressor.create(channel, def); //NOSONAR 491 } catch (final IOException e) { // NOSONAR 492 IOUtils.closeQuietly(channel); 493 channel = null; 494 outputStream = Files.newOutputStream(file, options); 495 streamCompressor = StreamCompressor.create(outputStream, def); 496 } 497 this.outputStream = outputStream; 498 this.channel = channel; 499 this.streamCompressor = streamCompressor; 500 this.isSplitZip = false; 501 } 502 503 /** 504 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 505 * 506 * <p>{@link 507 * org.apache.commons.compress.utils.SeekableInMemoryByteChannel} 508 * allows you to write to an in-memory archive using random 509 * access.</p> 510 * 511 * @param channel the channel to ZIP to 512 * @since 1.13 513 */ 514 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 515 this.channel = channel; 516 def = new Deflater(level, true); 517 streamCompressor = StreamCompressor.create(channel, def); 518 outputStream = null; 519 isSplitZip = false; 520 } 521 522 /** 523 * Adds an archive entry with a raw input stream. 524 * 525 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. 526 * Zip64 status is re-established based on the settings in this stream, and the supplied value 527 * is ignored. 528 * 529 * The entry is put and closed immediately. 530 * 531 * @param entry The archive entry to add 532 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 533 * @throws IOException If copying fails 534 */ 535 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) 536 throws IOException { 537 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 538 if (hasZip64Extra(ae)) { 539 // Will be re-added as required. this may make the file generated with this method 540 // somewhat smaller than standard mode, 541 // since standard mode is unable to remove the ZIP 64 header. 542 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 543 } 544 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN 545 && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 546 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 547 putArchiveEntry(ae, is2PhaseSource); 548 copyFromZipInputStream(rawStream); 549 closeCopiedEntry(is2PhaseSource); 550 } 551 552 /** 553 * Adds UnicodeExtra fields for name and file comment if mode is 554 * ALWAYS or the data cannot be encoded using the configured 555 * encoding. 556 */ 557 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, 558 final ByteBuffer name) 559 throws IOException { 560 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 561 || !encodable) { 562 ze.addExtraField(new UnicodePathExtraField(ze.getName(), 563 name.array(), 564 name.arrayOffset(), 565 name.limit() 566 - name.position())); 567 } 568 569 final String comm = ze.getComment(); 570 if (comm != null && !comm.isEmpty()) { 571 572 final boolean commentEncodable = zipEncoding.canEncode(comm); 573 574 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 575 || !commentEncodable) { 576 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 577 ze.addExtraField(new UnicodeCommentExtraField(comm, 578 commentB.array(), 579 commentB.arrayOffset(), 580 commentB.limit() 581 - commentB.position()) 582 ); 583 } 584 } 585 } 586 587 /** 588 * Whether this stream is able to write the given entry. 589 * 590 * <p>May return false if it is set up to use encryption or a 591 * compression method that hasn't been implemented yet.</p> 592 * @since 1.1 593 */ 594 @Override 595 public boolean canWriteEntryData(final ArchiveEntry ae) { 596 if (ae instanceof ZipArchiveEntry) { 597 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 598 return zae.getMethod() != ZipMethod.IMPLODING.getCode() 599 && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() 600 && ZipUtil.canHandleEntryData(zae); 601 } 602 return false; 603 } 604 605 /** 606 * Verifies the sizes aren't too big in the Zip64Mode.Never case 607 * and returns whether the entry would require a Zip64 extra 608 * field. 609 */ 610 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) 611 throws ZipException { 612 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 613 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 614 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 615 } 616 return actuallyNeedsZip64; 617 } 618 619 /** 620 * Closes this output stream and releases any system resources 621 * associated with the stream. 622 * 623 * @throws IOException if an I/O error occurs. 624 * @throws Zip64RequiredException if the archive's size exceeds 4 625 * GByte or there are more than 65535 entries inside the archive 626 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 627 */ 628 @Override 629 public void close() throws IOException { 630 try { 631 if (!finished) { 632 finish(); 633 } 634 } finally { 635 destroy(); 636 } 637 } 638 639 /** 640 * Writes all necessary data for this entry. 641 * @throws IOException on error 642 * @throws Zip64RequiredException if the entry's uncompressed or 643 * compressed size exceeds 4 GByte and {@link #setUseZip64} 644 * is {@link Zip64Mode#Never}. 645 */ 646 @Override 647 public void closeArchiveEntry() throws IOException { 648 preClose(); 649 650 flushDeflater(); 651 652 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 653 final long realCrc = streamCompressor.getCrc32(); 654 entry.bytesRead = streamCompressor.getBytesRead(); 655 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 656 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 657 closeEntry(actuallyNeedsZip64, false); 658 streamCompressor.reset(); 659 } 660 661 /** 662 * Writes all necessary data for this entry. 663 * 664 * @param phased This entry is second phase of a 2-phase ZIP creation, size, compressed size and crc 665 * are known in ZipArchiveEntry 666 * @throws IOException on error 667 * @throws Zip64RequiredException if the entry's uncompressed or 668 * compressed size exceeds 4 GByte and {@link #setUseZip64} 669 * is {@link Zip64Mode#Never}. 670 */ 671 private void closeCopiedEntry(final boolean phased) throws IOException { 672 preClose(); 673 entry.bytesRead = entry.entry.getSize(); 674 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 675 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 676 closeEntry(actuallyNeedsZip64, phased); 677 } 678 679 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 680 if (!phased && channel != null) { 681 rewriteSizesAndCrc(actuallyNeedsZip64); 682 } 683 684 if (!phased) { 685 writeDataDescriptor(entry.entry); 686 } 687 entry = null; 688 } 689 690 private void copyFromZipInputStream(final InputStream src) throws IOException { 691 if (entry == null) { 692 throw new IllegalStateException("No current entry"); 693 } 694 ZipUtil.checkRequestedFeatures(entry.entry); 695 entry.hasWritten = true; 696 int length; 697 while ((length = src.read(copyBuffer)) >= 0 ) 698 { 699 streamCompressor.writeCounted(copyBuffer, 0, length); 700 count( length ); 701 } 702 } 703 704 /** 705 * Creates a new ZIP entry taking some information from the given 706 * file and using the provided name. 707 * 708 * <p>The name will be adjusted to end with a forward slash "/" if 709 * the file is a directory. If the file is not a directory a 710 * potential trailing forward slash will be stripped from the 711 * entry name.</p> 712 * 713 * <p>Must not be used if the stream has already been closed.</p> 714 */ 715 @Override 716 public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName) 717 throws IOException { 718 if (finished) { 719 throw new IOException("Stream has already been finished"); 720 } 721 return new ZipArchiveEntry(inputFile, entryName); 722 } 723 724 /** 725 * Creates a new ZIP entry taking some information from the given 726 * file and using the provided name. 727 * 728 * <p>The name will be adjusted to end with a forward slash "/" if 729 * the file is a directory. If the file is not a directory a 730 * potential trailing forward slash will be stripped from the 731 * entry name.</p> 732 * 733 * <p>Must not be used if the stream has already been closed.</p> 734 * @param inputPath path to create the entry from. 735 * @param entryName name of the entry. 736 * @param options options indicating how symbolic links are handled. 737 * @return a new instance. 738 * @throws IOException if an I/O error occurs. 739 * @since 1.21 740 */ 741 @Override 742 public ArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) 743 throws IOException { 744 if (finished) { 745 throw new IOException("Stream has already been finished"); 746 } 747 return new ZipArchiveEntry(inputPath, entryName); 748 } 749 750 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 751 752 final EntryMetaData entryMetaData = metaData.get(ze); 753 final boolean needsZip64Extra = hasZip64Extra(ze) 754 || ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 755 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 756 || entryMetaData.offset >= ZipConstants.ZIP64_MAGIC 757 || ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT 758 || zip64Mode == Zip64Mode.Always 759 || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 760 761 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 762 // must be the offset that is too big, otherwise an 763 // exception would have been throw in putArchiveEntry or 764 // closeArchiveEntry 765 throw new Zip64RequiredException(Zip64RequiredException 766 .ARCHIVE_TOO_BIG_MESSAGE); 767 } 768 769 770 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 771 772 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 773 } 774 775 /** 776 * Writes the central file header entry. 777 * @param ze the entry to write 778 * @param name The encoded name 779 * @param entryMetaData meta data for this file 780 * @throws IOException on error 781 */ 782 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, 783 final EntryMetaData entryMetaData, 784 final boolean needsZip64Extra) throws IOException { 785 if (isSplitZip) { 786 // calculate the disk number for every central file header, 787 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 788 final int currentSplitSegment = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 789 if (numberOfCDInDiskData.get(currentSplitSegment) == null) { 790 numberOfCDInDiskData.put(currentSplitSegment, 1); 791 } else { 792 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 793 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 794 } 795 } 796 797 final byte[] extra = ze.getCentralDirectoryExtra(); 798 final int extraLength = extra.length; 799 800 // file comment length 801 String comm = ze.getComment(); 802 if (comm == null) { 803 comm = ""; 804 } 805 806 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 807 final int nameLen = name.limit() - name.position(); 808 final int commentLen = commentB.limit() - commentB.position(); 809 final int len= CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 810 final byte[] buf = new byte[len]; 811 812 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, ZipConstants.WORD); 813 814 // version made by 815 // CheckStyle:MagicNumber OFF 816 ZipShort.putShort(ze.getPlatform() << 8 | (!hasUsedZip64 ? ZipConstants.DATA_DESCRIPTOR_MIN_VERSION : ZipConstants.ZIP64_MIN_VERSION), 817 buf, CFH_VERSION_MADE_BY_OFFSET); 818 819 final int zipMethod = ze.getMethod(); 820 final boolean encodable = zipEncoding.canEncode(ze.getName()); 821 ZipShort.putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), 822 buf, CFH_VERSION_NEEDED_OFFSET); 823 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 824 825 // compression method 826 ZipShort.putShort(zipMethod, buf, CFH_METHOD_OFFSET); 827 828 829 // last mod. time and date 830 ZipUtil.toDosTime(ze.getTime(), buf, CFH_TIME_OFFSET); 831 832 // CRC 833 // compressed length 834 // uncompressed length 835 ZipLong.putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 836 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 837 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 838 || zip64Mode == Zip64Mode.Always 839 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 840 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 841 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 842 } else { 843 ZipLong.putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 844 ZipLong.putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 845 } 846 847 ZipShort.putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 848 849 // extra field length 850 ZipShort.putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 851 852 ZipShort.putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 853 854 // disk number start 855 if (isSplitZip) { 856 if (ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 857 ZipShort.putShort(ZipConstants.ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 858 } else { 859 ZipShort.putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 860 } 861 } else { 862 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, ZipConstants.SHORT); 863 } 864 865 // internal file attributes 866 ZipShort.putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 867 868 // external file attributes 869 ZipLong.putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 870 871 // relative offset of LFH 872 if (entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 873 ZipLong.putLong(ZipConstants.ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 874 } else { 875 ZipLong.putLong(Math.min(entryMetaData.offset, ZipConstants.ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 876 } 877 878 // file name 879 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 880 881 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 882 System.arraycopy(extra, 0, buf, extraStart, extraLength); 883 884 final int commentStart = extraStart + extraLength; 885 886 // file comment 887 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 888 return buf; 889 } 890 891 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, 892 final boolean phased, final long archiveOffset) { 893 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 894 if (oldEx != null) { 895 ze.removeExtraField(ResourceAlignmentExtraField.ID); 896 } 897 final ResourceAlignmentExtraField oldAlignmentEx = 898 oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 899 900 int alignment = ze.getAlignment(); 901 if (alignment <= 0 && oldAlignmentEx != null) { 902 alignment = oldAlignmentEx.getAlignment(); 903 } 904 905 if (alignment > 1 || oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange()) { 906 final int oldLength = LFH_FILENAME_OFFSET + 907 name.limit() - name.position() + 908 ze.getLocalFileDataExtra().length; 909 910 final int padding = (int) (-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE 911 - ResourceAlignmentExtraField.BASE_SIZE & 912 alignment - 1); 913 ze.addExtraField(new ResourceAlignmentExtraField(alignment, 914 oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 915 } 916 917 final byte[] extra = ze.getLocalFileDataExtra(); 918 final int nameLen = name.limit() - name.position(); 919 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 920 final byte[] buf = new byte[len]; 921 922 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, ZipConstants.WORD); 923 924 //store method in local variable to prevent multiple method calls 925 final int zipMethod = ze.getMethod(); 926 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 927 928 ZipShort.putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 929 930 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 931 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 932 933 // compression method 934 ZipShort.putShort(zipMethod, buf, LFH_METHOD_OFFSET); 935 936 ZipUtil.toDosTime(ze.getTime(), buf, LFH_TIME_OFFSET); 937 938 // CRC 939 if (phased || !(zipMethod == DEFLATED || channel != null)){ 940 ZipLong.putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 941 } else { 942 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, ZipConstants.WORD); 943 } 944 945 // compressed length 946 // uncompressed length 947 if (hasZip64Extra(entry.entry)){ 948 // point to ZIP64 extended information extra field for 949 // sizes, may get rewritten once sizes are known if 950 // stream is seekable 951 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 952 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 953 } else if (phased) { 954 ZipLong.putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 955 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 956 } else if (zipMethod == DEFLATED || channel != null) { 957 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, ZipConstants.WORD); 958 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, ZipConstants.WORD); 959 } else { // Stored 960 ZipLong.putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 961 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 962 } 963 // file name length 964 ZipShort.putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 965 966 // extra field length 967 ZipShort.putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 968 969 // file name 970 System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 971 972 // extra fields 973 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 974 975 return buf; 976 } 977 978 /** 979 * Writes next block of compressed data to the output stream. 980 * @throws IOException on error 981 */ 982 protected final void deflate() throws IOException { 983 streamCompressor.deflate(); 984 } 985 986 /** 987 * Closes the underlying stream/file without finishing the 988 * archive, the result will likely be a corrupt archive. 989 * 990 * <p>This method only exists to support tests that generate 991 * corrupt archives so they can clean up any temporary files.</p> 992 */ 993 void destroy() throws IOException { 994 try { 995 if (channel != null) { 996 channel.close(); 997 } 998 } finally { 999 if (outputStream != null) { 1000 outputStream.close(); 1001 } 1002 } 1003 } 1004 1005 /** 1006 * {@inheritDoc} 1007 * @throws Zip64RequiredException if the archive's size exceeds 4 1008 * GByte or there are more than 65535 entries inside the archive 1009 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 1010 */ 1011 @Override 1012 public void finish() throws IOException { 1013 if (finished) { 1014 throw new IOException("This archive has already been finished"); 1015 } 1016 1017 if (entry != null) { 1018 throw new IOException("This archive contains unclosed entries."); 1019 } 1020 1021 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 1022 cdOffset = cdOverallOffset; 1023 if (isSplitZip) { 1024 // when creating a split zip, the offset should be 1025 // the offset to the corresponding segment disk 1026 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.outputStream; 1027 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1028 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1029 } 1030 writeCentralDirectoryInChunks(); 1031 1032 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 1033 1034 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 1035 final ByteBuffer commentData = this.zipEncoding.encode(comment); 1036 final long commentLength = (long) commentData.limit() - commentData.position(); 1037 eocdLength = ZipConstants.WORD /* length of EOCD_SIG */ 1038 + ZipConstants.SHORT /* number of this disk */ 1039 + ZipConstants.SHORT /* disk number of start of central directory */ 1040 + ZipConstants.SHORT /* total number of entries on this disk */ 1041 + ZipConstants.SHORT /* total number of entries */ 1042 + ZipConstants.WORD /* size of central directory */ 1043 + ZipConstants.WORD /* offset of start of central directory */ 1044 + ZipConstants.SHORT /* ZIP comment length */ 1045 + commentLength /* ZIP comment */; 1046 1047 writeZip64CentralDirectory(); 1048 writeCentralDirectoryEnd(); 1049 metaData.clear(); 1050 entries.clear(); 1051 streamCompressor.close(); 1052 if (isSplitZip) { 1053 // trigger the ZipSplitOutputStream to write the final split segment 1054 outputStream.close(); 1055 } 1056 finished = true; 1057 } 1058 1059 /** 1060 * Flushes this output stream and forces any buffered output bytes 1061 * to be written out to the stream. 1062 * 1063 * @throws IOException if an I/O error occurs. 1064 */ 1065 @Override 1066 public void flush() throws IOException { 1067 if (outputStream != null) { 1068 outputStream.flush(); 1069 } 1070 } 1071 1072 /** 1073 * Ensures all bytes sent to the deflater are written to the stream. 1074 */ 1075 private void flushDeflater() throws IOException { 1076 if (entry.entry.getMethod() == DEFLATED) { 1077 streamCompressor.flushDeflater(); 1078 } 1079 } 1080 1081 /** 1082 * Returns the total number of bytes written to this stream. 1083 * @return the number of written bytes 1084 * @since 1.22 1085 */ 1086 @Override 1087 public long getBytesWritten() { 1088 return streamCompressor.getTotalBytesWritten(); 1089 } 1090 1091 /** 1092 * If the mode is AsNeeded and the entry is a compressed entry of 1093 * unknown size that gets written to a non-seekable stream then 1094 * change the default to Never. 1095 * 1096 * @since 1.3 1097 */ 1098 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1099 if (zip64Mode != Zip64Mode.AsNeeded 1100 || channel != null 1101 || ze.getMethod() != DEFLATED 1102 || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1103 return zip64Mode; 1104 } 1105 return Zip64Mode.Never; 1106 } 1107 1108 /** 1109 * The encoding to use for file names and the file comment. 1110 * 1111 * @return null if using the platform's default character encoding. 1112 */ 1113 public String getEncoding() { 1114 return encoding; 1115 } 1116 1117 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1118 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1119 return !encodable && fallbackToUTF8 1120 ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 1121 } 1122 1123 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1124 final GeneralPurposeBit b = new GeneralPurposeBit(); 1125 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1126 if (usesDataDescriptor) { 1127 b.useDataDescriptor(true); 1128 } 1129 return b; 1130 } 1131 1132 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1133 return getEntryEncoding(ze).encode(ze.getName()); 1134 } 1135 1136 /** 1137 * Get the existing ZIP64 extended information extra field or 1138 * create a new one and add it to the entry. 1139 * 1140 * @since 1.3 1141 */ 1142 private Zip64ExtendedInformationExtraField 1143 getZip64Extra(final ZipArchiveEntry ze) { 1144 if (entry != null) { 1145 entry.causedUseOfZip64 = !hasUsedZip64; 1146 } 1147 hasUsedZip64 = true; 1148 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1149 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField 1150 ? (Zip64ExtendedInformationExtraField) extra : null; 1151 if (z64 == null) { 1152 /* 1153 System.err.println("Adding z64 for " + ze.getName() 1154 + ", method: " + ze.getMethod() 1155 + " (" + (ze.getMethod() == STORED) + ")" 1156 + ", channel: " + (channel != null)); 1157 */ 1158 z64 = new Zip64ExtendedInformationExtraField(); 1159 } 1160 1161 // even if the field is there already, make sure it is the first one 1162 ze.addAsFirstExtraField(z64); 1163 1164 return z64; 1165 } 1166 1167 /** 1168 * Ensures the current entry's size and CRC information is set to 1169 * the values just written, verifies it isn't too big in the 1170 * Zip64Mode.Never case and returns whether the entry would 1171 * require a Zip64 extra field. 1172 */ 1173 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, 1174 final Zip64Mode effectiveMode) 1175 throws ZipException { 1176 if (entry.entry.getMethod() == DEFLATED) { 1177 /* It turns out def.getBytesRead() returns wrong values if 1178 * the size exceeds 4 GB on Java < Java7 1179 entry.entry.setSize(def.getBytesRead()); 1180 */ 1181 entry.entry.setSize(entry.bytesRead); 1182 entry.entry.setCompressedSize(bytesWritten); 1183 entry.entry.setCrc(crc); 1184 1185 } else if (channel == null) { 1186 if (entry.entry.getCrc() != crc) { 1187 throw new ZipException("Bad CRC checksum for entry " 1188 + entry.entry.getName() + ": " 1189 + Long.toHexString(entry.entry.getCrc()) 1190 + " instead of " 1191 + Long.toHexString(crc)); 1192 } 1193 1194 if (entry.entry.getSize() != bytesWritten) { 1195 throw new ZipException("Bad size for entry " 1196 + entry.entry.getName() + ": " 1197 + entry.entry.getSize() 1198 + " instead of " 1199 + bytesWritten); 1200 } 1201 } else { /* method is STORED and we used SeekableByteChannel */ 1202 entry.entry.setSize(bytesWritten); 1203 entry.entry.setCompressedSize(bytesWritten); 1204 entry.entry.setCrc(crc); 1205 } 1206 1207 return checkIfNeedsZip64(effectiveMode); 1208 } 1209 1210 /** 1211 * If the entry needs Zip64 extra information inside the central 1212 * directory then configure its data. 1213 */ 1214 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, 1215 final boolean needsZip64Extra) { 1216 if (needsZip64Extra) { 1217 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1218 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1219 || ze.getSize() >= ZipConstants.ZIP64_MAGIC 1220 || zip64Mode == Zip64Mode.Always 1221 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1222 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1223 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1224 } else { 1225 // reset value that may have been set for LFH 1226 z64.setCompressedSize(null); 1227 z64.setSize(null); 1228 } 1229 1230 final boolean needsToEncodeLfhOffset = 1231 lfhOffset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1232 final boolean needsToEncodeDiskNumberStart = 1233 ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1234 1235 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1236 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1237 } 1238 if (needsToEncodeDiskNumberStart) { 1239 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1240 } 1241 ze.setExtra(); 1242 } 1243 } 1244 1245 /** 1246 * Is there a ZIP64 extended information extra field for the 1247 * entry? 1248 * 1249 * @since 1.3 1250 */ 1251 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1252 return ze.getExtraField(Zip64ExtendedInformationExtraField 1253 .HEADER_ID) 1254 instanceof Zip64ExtendedInformationExtraField; 1255 } 1256 /** 1257 * This method indicates whether this archive is writing to a 1258 * seekable stream (i.e., to a random access file). 1259 * 1260 * <p>For seekable streams, you don't need to calculate the CRC or 1261 * uncompressed size for {@link #STORED} entries before 1262 * invoking {@link #putArchiveEntry(ArchiveEntry)}. 1263 * @return true if seekable 1264 */ 1265 public boolean isSeekable() { 1266 return channel != null; 1267 } 1268 1269 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry) { 1270 return zipArchiveEntry.getSize() >= ZipConstants.ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC; 1271 } 1272 1273 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 1274 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility 1275 || isTooLargeForZip32(entry1); 1276 } 1277 1278 private void preClose() throws IOException { 1279 if (finished) { 1280 throw new IOException("Stream has already been finished"); 1281 } 1282 1283 if (entry == null) { 1284 throw new IOException("No current entry to close"); 1285 } 1286 1287 if (!entry.hasWritten) { 1288 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 1289 } 1290 } 1291 /** 1292 * {@inheritDoc} 1293 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1294 * @throws Zip64RequiredException if the entry's uncompressed or 1295 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 1296 * is {@link Zip64Mode#Never}. 1297 */ 1298 @Override 1299 public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException { 1300 putArchiveEntry((ZipArchiveEntry) archiveEntry, false); 1301 } 1302 1303 /** 1304 * Writes the headers for an archive entry to the output stream. 1305 * The caller must then write the content to the stream and call 1306 * {@link #closeArchiveEntry()} to complete the process. 1307 1308 * @param archiveEntry The archiveEntry 1309 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 1310 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1311 * @throws Zip64RequiredException if the entry's uncompressed or 1312 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 1313 * is {@link Zip64Mode#Never}. 1314 */ 1315 private void putArchiveEntry(final ZipArchiveEntry archiveEntry, final boolean phased) throws IOException { 1316 if (finished) { 1317 throw new IOException("Stream has already been finished"); 1318 } 1319 1320 if (entry != null) { 1321 closeArchiveEntry(); 1322 } 1323 1324 entry = new CurrentEntry(archiveEntry); 1325 entries.add(entry.entry); 1326 1327 setDefaults(entry.entry); 1328 1329 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 1330 validateSizeInformation(effectiveMode); 1331 1332 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 1333 1334 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 1335 1336 final ZipEightByteInteger size; 1337 final ZipEightByteInteger compressedSize; 1338 if (phased) { 1339 // sizes are already known 1340 size = new ZipEightByteInteger(entry.entry.getSize()); 1341 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 1342 } else if (entry.entry.getMethod() == STORED 1343 && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1344 // actually, we already know the sizes 1345 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 1346 } else { 1347 // just a placeholder, real data will be in data 1348 // descriptor or inserted later via SeekableByteChannel 1349 compressedSize = size = ZipEightByteInteger.ZERO; 1350 } 1351 z64.setSize(size); 1352 z64.setCompressedSize(compressedSize); 1353 entry.entry.setExtra(); 1354 } 1355 1356 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 1357 def.setLevel(level); 1358 hasCompressionLevelChanged = false; 1359 } 1360 writeLocalFileHeader(archiveEntry, phased); 1361 } 1362 1363 /** 1364 * When using random access output, write the local file header 1365 * and potentially the ZIP64 extra containing the correct CRC and 1366 * compressed/uncompressed sizes. 1367 */ 1368 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) 1369 throws IOException { 1370 final long save = channel.position(); 1371 1372 channel.position(entry.localDataStart); 1373 writeOut(ZipLong.getBytes(entry.entry.getCrc())); 1374 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 1375 writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); 1376 writeOut(ZipLong.getBytes(entry.entry.getSize())); 1377 } else { 1378 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 1379 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 1380 } 1381 1382 if (hasZip64Extra(entry.entry)) { 1383 final ByteBuffer name = getName(entry.entry); 1384 final int nameLen = name.limit() - name.position(); 1385 // seek to ZIP64 extra, skip header and size information 1386 channel.position(entry.localDataStart + 3 * ZipConstants.WORD + 2 * ZipConstants.SHORT + nameLen + 2 * ZipConstants.SHORT); 1387 // inside the ZIP64 extra uncompressed size comes 1388 // first, unlike the LFH, CD or data descriptor 1389 writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize())); 1390 writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize())); 1391 1392 if (!actuallyNeedsZip64) { 1393 // do some cleanup: 1394 // * rewrite version needed to extract 1395 channel.position(entry.localDataStart - 5 * ZipConstants.SHORT); 1396 writeOut(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false))); 1397 1398 // * remove ZIP64 extra, so it doesn't get written 1399 // to the central directory 1400 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField 1401 .HEADER_ID); 1402 entry.entry.setExtra(); 1403 1404 // * reset hasUsedZip64 if it has been set because 1405 // of this entry 1406 if (entry.causedUseOfZip64) { 1407 hasUsedZip64 = false; 1408 } 1409 } 1410 } 1411 channel.position(save); 1412 } 1413 1414 /** 1415 * Set the file comment. 1416 * @param comment the comment 1417 */ 1418 public void setComment(final String comment) { 1419 this.comment = comment; 1420 } 1421 1422 1423 /** 1424 * Whether to create Unicode Extra Fields. 1425 * 1426 * <p>Defaults to NEVER.</p> 1427 * 1428 * @param b whether to create Unicode Extra Fields. 1429 */ 1430 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 1431 createUnicodeExtraFields = b; 1432 } 1433 1434 1435 /** 1436 * Provides default values for compression method and last 1437 * modification time. 1438 */ 1439 private void setDefaults(final ZipArchiveEntry entry) { 1440 if (entry.getMethod() == -1) { // not specified 1441 entry.setMethod(method); 1442 } 1443 1444 if (entry.getTime() == -1) { // not specified 1445 entry.setTime(System.currentTimeMillis()); 1446 } 1447 } 1448 1449 /** 1450 * The encoding to use for file names and the file comment. 1451 * 1452 * <p>For a list of possible values see <a 1453 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 1454 * Defaults to UTF-8.</p> 1455 * @param encoding the encoding to use for file names, use null 1456 * for the platform's default encoding 1457 */ 1458 public void setEncoding(final String encoding) { 1459 this.encoding = encoding; 1460 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 1461 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 1462 useUTF8Flag = false; 1463 } 1464 } 1465 1466 /** 1467 * Whether to fall back to UTF and the language encoding flag if 1468 * the file name cannot be encoded using the specified encoding. 1469 * 1470 * <p>Defaults to false.</p> 1471 * 1472 * @param b whether to fall back to UTF and the language encoding 1473 * flag if the file name cannot be encoded using the specified 1474 * encoding. 1475 */ 1476 public void setFallbackToUTF8(final boolean b) { 1477 fallbackToUTF8 = b; 1478 } 1479 1480 /** 1481 * Sets the compression level for subsequent entries. 1482 * 1483 * <p>Default is Deflater.DEFAULT_COMPRESSION.</p> 1484 * @param level the compression level. 1485 * @throws IllegalArgumentException if an invalid compression 1486 * level is specified. 1487 */ 1488 public void setLevel(final int level) { 1489 if (level < Deflater.DEFAULT_COMPRESSION 1490 || level > Deflater.BEST_COMPRESSION) { 1491 throw new IllegalArgumentException("Invalid compression level: " 1492 + level); 1493 } 1494 if (this.level == level) { 1495 return; 1496 } 1497 hasCompressionLevelChanged = true; 1498 this.level = level; 1499 } 1500 1501 /** 1502 * Sets the default compression method for subsequent entries. 1503 * 1504 * <p>Default is DEFLATED.</p> 1505 * @param method an {@code int} from java.util.zip.ZipEntry 1506 */ 1507 public void setMethod(final int method) { 1508 this.method = method; 1509 } 1510 1511 /** 1512 * Whether to set the language encoding flag if the file name 1513 * encoding is UTF-8. 1514 * 1515 * <p>Defaults to true.</p> 1516 * 1517 * @param b whether to set the language encoding flag if the file 1518 * name encoding is UTF-8 1519 */ 1520 public void setUseLanguageEncodingFlag(final boolean b) { 1521 useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding); 1522 } 1523 1524 /** 1525 * Whether Zip64 extensions will be used. 1526 * 1527 * <p>When setting the mode to {@link Zip64Mode#Never Never}, 1528 * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link 1529 * #finish} or {@link #close} may throw a {@link 1530 * Zip64RequiredException} if the entry's size or the total size 1531 * of the archive exceeds 4GB or there are more than 65536 entries 1532 * inside the archive. Any archive created in this mode will be 1533 * readable by implementations that don't support Zip64.</p> 1534 * 1535 * <p>When setting the mode to {@link Zip64Mode#Always Always}, 1536 * Zip64 extensions will be used for all entries. Any archive 1537 * created in this mode may be unreadable by implementations that 1538 * don't support Zip64 even if all its contents would be.</p> 1539 * 1540 * <p>When setting the mode to {@link Zip64Mode#AsNeeded 1541 * AsNeeded}, Zip64 extensions will transparently be used for 1542 * those entries that require them. This mode can only be used if 1543 * the uncompressed size of the {@link ZipArchiveEntry} is known 1544 * when calling {@link #putArchiveEntry} or the archive is written 1545 * to a seekable output (i.e. you have used the {@link 1546 * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - 1547 * this mode is not valid when the output stream is not seekable 1548 * and the uncompressed size is unknown when {@link 1549 * #putArchiveEntry} is called.</p> 1550 * 1551 * <p>If no entry inside the resulting archive requires Zip64 1552 * extensions then {@link Zip64Mode#Never Never} will create the 1553 * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will 1554 * create a slightly bigger archive if the uncompressed size of 1555 * any entry has initially been unknown and create an archive 1556 * identical to {@link Zip64Mode#Never Never} otherwise. {@link 1557 * Zip64Mode#Always Always} will create an archive that is at 1558 * least 24 bytes per entry bigger than the one {@link 1559 * Zip64Mode#Never Never} would create.</p> 1560 * 1561 * <p>Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless 1562 * {@link #putArchiveEntry} is called with an entry of unknown 1563 * size and data is written to a non-seekable stream - in this 1564 * case the default is {@link Zip64Mode#Never Never}.</p> 1565 * 1566 * @since 1.3 1567 * @param mode Whether Zip64 extensions will be used. 1568 */ 1569 public void setUseZip64(final Zip64Mode mode) { 1570 zip64Mode = mode; 1571 } 1572 1573 /** 1574 * Whether to add a Zip64 extended information extra field to the 1575 * local file header. 1576 * 1577 * <p>Returns true if</p> 1578 * 1579 * <ul> 1580 * <li>mode is Always</li> 1581 * <li>or we already know it is going to be needed</li> 1582 * <li>or the size is unknown and we can ensure it won't hurt 1583 * other implementations if we add it (i.e. we can erase its 1584 * usage</li> 1585 * </ul> 1586 */ 1587 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 1588 return mode == Zip64Mode.Always 1589 || mode == Zip64Mode.AlwaysWithCompatibility 1590 || entry.getSize() >= ZipConstants.ZIP64_MAGIC 1591 || entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1592 || entry.getSize() == ArchiveEntry.SIZE_UNKNOWN 1593 && channel != null && mode != Zip64Mode.Never; 1594 } 1595 1596 /** 1597 * 4.4.1.4 If one of the fields in the end of central directory 1598 * record is too small to hold required data, the field SHOULD be 1599 * set to -1 (0xFFFF or 0xFFFFFFFF) and the ZIP64 format record 1600 * SHOULD be created. 1601 * @return true if zip64 End Of Central Directory is needed 1602 */ 1603 private boolean shouldUseZip64EOCD() { 1604 int numberOfThisDisk = 0; 1605 if (isSplitZip) { 1606 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1607 } 1608 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1609 return numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* number of this disk */ 1610 || cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1611 || numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1612 || entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1613 || cdLength >= ZipConstants.ZIP64_MAGIC /* size of the central directory */ 1614 || cdOffset >= ZipConstants.ZIP64_MAGIC; /* offset of start of central directory with respect to 1615 the starting disk number */ 1616 } 1617 1618 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1619 return !phased && zipMethod == DEFLATED && channel == null; 1620 } 1621 1622 /** 1623 * If the Zip64 mode is set to never, then all the data in End Of Central Directory 1624 * should not exceed their limits. 1625 * @throws Zip64RequiredException if Zip64 is actually needed 1626 */ 1627 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1628 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1629 if (zip64Mode != Zip64Mode.Never) { 1630 return; 1631 } 1632 1633 int numberOfThisDisk = 0; 1634 if (isSplitZip) { 1635 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1636 } 1637 if (numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1638 throw new Zip64RequiredException(Zip64RequiredException 1639 .NUMBER_OF_THIS_DISK_TOO_BIG_MESSAGE); 1640 } 1641 1642 if (cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT) { 1643 throw new Zip64RequiredException(Zip64RequiredException 1644 .NUMBER_OF_THE_DISK_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1645 } 1646 1647 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1648 if (numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1649 throw new Zip64RequiredException(Zip64RequiredException 1650 .TOO_MANY_ENTRIES_ON_THIS_DISK_MESSAGE); 1651 } 1652 1653 // number of entries 1654 if (entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT) { 1655 throw new Zip64RequiredException(Zip64RequiredException 1656 .TOO_MANY_ENTRIES_MESSAGE); 1657 } 1658 1659 if (cdLength >= ZipConstants.ZIP64_MAGIC) { 1660 throw new Zip64RequiredException(Zip64RequiredException 1661 .SIZE_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1662 } 1663 1664 if (cdOffset >= ZipConstants.ZIP64_MAGIC) { 1665 throw new Zip64RequiredException(Zip64RequiredException 1666 .ARCHIVE_TOO_BIG_MESSAGE); 1667 } 1668 } 1669 1670 1671 /** 1672 * Throws an exception if the size is unknown for a stored entry 1673 * that is written to a non-seekable output or the entry is too 1674 * big to be written without Zip64 extra but the mode has been set 1675 * to Never. 1676 */ 1677 private void validateSizeInformation(final Zip64Mode effectiveMode) 1678 throws ZipException { 1679 // Size/CRC not required if SeekableByteChannel is used 1680 if (entry.entry.getMethod() == STORED && channel == null) { 1681 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 1682 throw new ZipException("Uncompressed size is required for" 1683 + " STORED method when not writing to a" 1684 + " file"); 1685 } 1686 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 1687 throw new ZipException("CRC checksum is required for STORED" 1688 + " method when not writing to a file"); 1689 } 1690 entry.entry.setCompressedSize(entry.entry.getSize()); 1691 } 1692 1693 if ((entry.entry.getSize() >= ZipConstants.ZIP64_MAGIC 1694 || entry.entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC) 1695 && effectiveMode == Zip64Mode.Never) { 1696 throw new Zip64RequiredException(Zip64RequiredException 1697 .getEntryTooBigMessage(entry.entry)); 1698 } 1699 } 1700 1701 1702 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1703 if (zip64) { 1704 return ZipConstants.ZIP64_MIN_VERSION; 1705 } 1706 if (usedDataDescriptor) { 1707 return ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 1708 } 1709 return versionNeededToExtractMethod(zipMethod); 1710 } 1711 1712 private int versionNeededToExtractMethod(final int zipMethod) { 1713 return zipMethod == DEFLATED ? ZipConstants.DEFLATE_MIN_VERSION : ZipConstants.INITIAL_VERSION; 1714 } 1715 1716 /** 1717 * Writes bytes to ZIP entry. 1718 * @param b the byte array to write 1719 * @param offset the start position to write from 1720 * @param length the number of bytes to write 1721 * @throws IOException on error 1722 */ 1723 @Override 1724 public void write(final byte[] b, final int offset, final int length) throws IOException { 1725 if (entry == null) { 1726 throw new IllegalStateException("No current entry"); 1727 } 1728 ZipUtil.checkRequestedFeatures(entry.entry); 1729 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1730 count(writtenThisTime); 1731 } 1732 1733 /** 1734 * Writes the "End of central dir record". 1735 * @throws IOException on error 1736 * @throws Zip64RequiredException if the archive's size exceeds 4 1737 * GByte or there are more than 65535 entries inside the archive 1738 * and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1739 */ 1740 protected void writeCentralDirectoryEnd() throws IOException { 1741 if (!hasUsedZip64 && isSplitZip) { 1742 ((ZipSplitOutputStream)this.outputStream).prepareToWriteUnsplittableContent(eocdLength); 1743 } 1744 1745 validateIfZip64IsNeededInEOCD(); 1746 1747 writeCounted(EOCD_SIG); 1748 1749 // number of this disk 1750 int numberOfThisDisk = 0; 1751 if (isSplitZip) { 1752 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1753 } 1754 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1755 1756 // disk number of the start of central directory 1757 writeCounted(ZipShort.getBytes((int)cdDiskNumberStart)); 1758 1759 // number of entries 1760 final int numberOfEntries = entries.size(); 1761 1762 // total number of entries in the central directory on this disk 1763 final int numOfEntriesOnThisDisk = isSplitZip 1764 ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) 1765 : numberOfEntries; 1766 final byte[] numOfEntriesOnThisDiskData = ZipShort 1767 .getBytes(Math.min(numOfEntriesOnThisDisk, ZipConstants.ZIP64_MAGIC_SHORT)); 1768 writeCounted(numOfEntriesOnThisDiskData); 1769 1770 // number of entries 1771 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, ZipConstants.ZIP64_MAGIC_SHORT)); 1772 writeCounted(num); 1773 1774 // length and location of CD 1775 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZipConstants.ZIP64_MAGIC))); 1776 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZipConstants.ZIP64_MAGIC))); 1777 1778 // ZIP file comment 1779 final ByteBuffer data = this.zipEncoding.encode(comment); 1780 final int dataLen = data.limit() - data.position(); 1781 writeCounted(ZipShort.getBytes(dataLen)); 1782 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1783 } 1784 1785 private void writeCentralDirectoryInChunks() throws IOException { 1786 final int NUM_PER_WRITE = 1000; 1787 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 1788 int count = 0; 1789 for (final ZipArchiveEntry ze : entries) { 1790 byteArrayOutputStream.write(createCentralFileHeader(ze)); 1791 if (++count > NUM_PER_WRITE){ 1792 writeCounted(byteArrayOutputStream.toByteArray()); 1793 byteArrayOutputStream.reset(); 1794 count = 0; 1795 } 1796 } 1797 writeCounted(byteArrayOutputStream.toByteArray()); 1798 } 1799 1800 /** 1801 * Writes the central file header entry. 1802 * @param ze the entry to write 1803 * @throws IOException on error 1804 * @throws Zip64RequiredException if the archive's size exceeds 4 1805 * GByte and {@link #setUseZip64(Zip64Mode)} is {@link 1806 * Zip64Mode#Never}. 1807 */ 1808 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1809 final byte[] centralFileHeader = createCentralFileHeader(ze); 1810 writeCounted(centralFileHeader); 1811 } 1812 1813 /** 1814 * Write bytes to output or random access file. 1815 * @param data the byte array to write 1816 * @throws IOException on error 1817 */ 1818 private void writeCounted(final byte[] data) throws IOException { 1819 streamCompressor.writeCounted(data); 1820 } 1821 1822 /** 1823 * Writes the data descriptor entry. 1824 * @param ze the entry to write 1825 * @throws IOException on error 1826 */ 1827 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1828 if (!usesDataDescriptor(ze.getMethod(), false)) { 1829 return; 1830 } 1831 writeCounted(DD_SIG); 1832 writeCounted(ZipLong.getBytes(ze.getCrc())); 1833 if (!hasZip64Extra(ze)) { 1834 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1835 writeCounted(ZipLong.getBytes(ze.getSize())); 1836 } else { 1837 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1838 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1839 } 1840 } 1841 1842 /** 1843 * Writes the local file header entry 1844 * @param ze the entry to write 1845 * @throws IOException on error 1846 */ 1847 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1848 writeLocalFileHeader(ze, false); 1849 } 1850 1851 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1852 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1853 final ByteBuffer name = getName(ze); 1854 1855 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1856 addUnicodeExtraFields(ze, encodable, name); 1857 } 1858 1859 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1860 if (isSplitZip) { 1861 // when creating a split zip, the offset should be 1862 // the offset to the corresponding segment disk 1863 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream)this.outputStream; 1864 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1865 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1866 } 1867 1868 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1869 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1870 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1871 writeCounted(localHeader); 1872 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1873 } 1874 1875 /** 1876 * Write bytes to output or random access file. 1877 * @param data the byte array to write 1878 * @throws IOException on error 1879 */ 1880 protected final void writeOut(final byte[] data) throws IOException { 1881 streamCompressor.writeOut(data, 0, data.length); 1882 } 1883 1884 /** 1885 * Write bytes to output or random access file. 1886 * @param data the byte array to write 1887 * @param offset the start position to write from 1888 * @param length the number of bytes to write 1889 * @throws IOException on error 1890 */ 1891 protected final void writeOut(final byte[] data, final int offset, final int length) 1892 throws IOException { 1893 streamCompressor.writeOut(data, offset, length); 1894 } 1895 1896 /** 1897 * Write preamble data. For most of the time, this is used to 1898 * make self-extracting zips. 1899 * 1900 * @param preamble data to write 1901 * @throws IOException if an entry already exists 1902 * @since 1.21 1903 */ 1904 public void writePreamble(final byte[] preamble) throws IOException { 1905 writePreamble(preamble, 0, preamble.length); 1906 } 1907 1908 /** 1909 * Write preamble data. For most of the time, this is used to 1910 * make self-extracting zips. 1911 * 1912 * @param preamble data to write 1913 * @param offset the start offset in the data 1914 * @param length the number of bytes to write 1915 * @throws IOException if an entry already exists 1916 * @since 1.21 1917 */ 1918 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1919 if (entry != null) { 1920 throw new IllegalStateException("Preamble must be written before creating an entry"); 1921 } 1922 this.streamCompressor.writeCounted(preamble, offset, length); 1923 } 1924 1925 /** 1926 * Writes the "ZIP64 End of central dir record" and 1927 * "ZIP64 End of central dir locator". 1928 * @throws IOException on error 1929 * @since 1.3 1930 */ 1931 protected void writeZip64CentralDirectory() throws IOException { 1932 if (zip64Mode == Zip64Mode.Never) { 1933 return; 1934 } 1935 1936 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1937 // actually "will use" 1938 hasUsedZip64 = true; 1939 } 1940 1941 if (!hasUsedZip64) { 1942 return; 1943 } 1944 1945 long offset = streamCompressor.getTotalBytesWritten(); 1946 long diskNumberStart = 0L; 1947 if (isSplitZip) { 1948 // when creating a split zip, the offset of should be 1949 // the offset to the corresponding segment disk 1950 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.outputStream; 1951 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1952 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1953 } 1954 1955 1956 writeOut(ZIP64_EOCD_SIG); 1957 // size of zip64 end of central directory, we don't have any variable length 1958 // as we don't support the extensible data sector, yet 1959 writeOut(ZipEightByteInteger 1960 .getBytes(ZipConstants.SHORT /* version made by */ 1961 + ZipConstants.SHORT /* version needed to extract */ 1962 + ZipConstants.WORD /* disk number */ 1963 + ZipConstants.WORD /* disk with central directory */ 1964 + ZipConstants.DWORD /* number of entries in CD on this disk */ 1965 + ZipConstants.DWORD /* total number of entries */ 1966 + ZipConstants.DWORD /* size of CD */ 1967 + (long) ZipConstants.DWORD /* offset of CD */ 1968 )); 1969 1970 // version made by and version needed to extract 1971 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1972 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1973 1974 // number of this disk 1975 int numberOfThisDisk = 0; 1976 if (isSplitZip) { 1977 numberOfThisDisk = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex(); 1978 } 1979 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1980 1981 // disk number of the start of central directory 1982 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1983 1984 // total number of entries in the central directory on this disk 1985 final int numOfEntriesOnThisDisk = isSplitZip 1986 ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) 1987 : entries.size(); 1988 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1989 writeOut(numOfEntriesOnThisDiskData); 1990 1991 // number of entries 1992 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1993 writeOut(num); 1994 1995 // length and location of CD 1996 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1997 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1998 1999 // no "zip64 extensible data sector" for now 2000 2001 if (isSplitZip) { 2002 // based on the ZIP specification, the End Of Central Directory record and 2003 // the Zip64 End Of Central Directory locator record must be on the same segment 2004 final int zip64EOCDLOCLength = ZipConstants.WORD /* length of ZIP64_EOCD_LOC_SIG */ 2005 + ZipConstants.WORD /* disk number of ZIP64_EOCD_SIG */ 2006 + ZipConstants.DWORD /* offset of ZIP64_EOCD_SIG */ 2007 + ZipConstants.WORD /* total number of disks */; 2008 2009 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 2010 ((ZipSplitOutputStream)this.outputStream).prepareToWriteUnsplittableContent(unsplittableContentSize); 2011 } 2012 2013 // and now the "ZIP64 end of central directory locator" 2014 writeOut(ZIP64_EOCD_LOC_SIG); 2015 2016 // disk number holding the ZIP64 EOCD record 2017 writeOut(ZipLong.getBytes(diskNumberStart)); 2018 // relative offset of ZIP64 EOCD record 2019 writeOut(ZipEightByteInteger.getBytes(offset)); 2020 // total number of disks 2021 if (isSplitZip) { 2022 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 2023 // in the same split disk, it means they must be located in the last disk 2024 final int totalNumberOfDisks = ((ZipSplitOutputStream)this.outputStream).getCurrentSplitSegmentIndex() + 1; 2025 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 2026 } else { 2027 writeOut(ONE); 2028 } 2029 } 2030}