Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -525,11 +525,10 @@ private bool WriteCentralDirectoryFileHeaderInitialize(bool forceWrite, out Zip6
{
offsetOfLocalHeaderTruncated = ZipHelper.Mask32Bit;

// If we have one of the sizes, the other must go in there as speced for LH, but not necessarily for CH, but we do it anyways
zip64ExtraField = new()
{
LocalHeaderOffset = _offsetOfLocalHeader
};
// If we already created a zip64 extra field for sizes, add the offset to it
// Otherwise, create a new one for just the offset
zip64ExtraField ??= new();
zip64ExtraField.LocalHeaderOffset = _offsetOfLocalHeader;
}
else
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,4 +126,78 @@ public static async Task CheckZIP64VersionIsSet_ForSmallFilesAfterBigFiles_Async
File.Delete(zipArchivePath);
}
}

[ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsNotMobile), nameof(PlatformDetection.Is64BitProcess))]
[OuterLoop("It requires ~11 GB of free disk space")]
public static async Task LargeFile_At_LargeOffset_ZIP64_HeaderPreservation()
{
// Regression test for https://github.com/dotnet/runtime/issues/114205
// When a large file (>4GB) is placed at an offset >4GB, both the sizes
// and offset need ZIP64 extra field entries in the central directory.
// Previously, the offset handling would overwrite the sizes, causing corruption.

byte[] buffer = GC.AllocateUninitializedArray<byte>(1_000_000_000); // 1 GB
string zipArchivePath = Path.Combine(Path.GetTempPath(), "largeFileAtLargeOffset.zip");

try
{
using (FileStream fs = File.Open(zipArchivePath, FileMode.Create, FileAccess.ReadWrite))
await using (ZipArchive archive = await ZipArchive.CreateAsync(fs, ZipArchiveMode.Create, leaveOpen: true, entryNameEncoding: null))
{
// First, write small files totaling >4GB to push the offset past 4GB
// Write 5 x 1GB files = 5GB of data to ensure offset > 4GB
for (int i = 0; i < 5; i++)
{
ZipArchiveEntry smallEntry = archive.CreateEntry($"prefix_{i}.bin", CompressionLevel.NoCompression);
await using (Stream stream = await smallEntry.OpenAsync())
{
await stream.WriteAsync(buffer);
}
}

// Now write a large file (>4GB) at an offset that is also >4GB
// This triggers both AreSizesTooLarge and IsOffsetTooLarge conditions
ZipArchiveEntry largeEntry = archive.CreateEntry("largefile.bin", CompressionLevel.NoCompression);
await using (Stream stream = await largeEntry.OpenAsync())
{
// Write 5GB of data (5 x 1GB chunks)
for (int i = 0; i < 5; i++)
{
await stream.WriteAsync(buffer);
}
}
}

// Verify the archive can be read back without corruption
await using (FileStream fs = File.OpenRead(zipArchivePath))
await using (ZipArchive archive = await ZipArchive.CreateAsync(fs, ZipArchiveMode.Read, leaveOpen: false, entryNameEncoding: null))
{
Assert.Equal(6, archive.Entries.Count);

// Verify each entry can be opened and has correct length
foreach (ZipArchiveEntry entry in archive.Entries)
{
await using Stream entryStream = await entry.OpenAsync();

if (entry.Name.StartsWith("prefix_"))
{
Assert.Equal(buffer.Length, entry.Length);
}
else if (entry.Name == "largefile.bin")
{
Assert.Equal(5L * buffer.Length, entry.Length);
}

// Verify we can read from the stream (this would throw if header is corrupt)
byte[] readBuffer = new byte[1024];
int bytesRead = await entryStream.ReadAsync(readBuffer);
Assert.True(bytesRead > 0);
}
}
}
finally
{
File.Delete(zipArchivePath);
}
}
}
Loading