f.Extra = d[filenameLen : filenameLen+extraLen]
f.Comment = string(d[filenameLen+extraLen:])
+ needUSize := f.UncompressedSize == ^uint32(0)
+ needCSize := f.CompressedSize == ^uint32(0)
+ needHeaderOffset := f.headerOffset == int64(^uint32(0))
+
if len(f.Extra) > 0 {
+ // Best effort to find what we need.
+ // Other zip authors might not even follow the basic format,
+ // and we'll just ignore the Extra content in that case.
b := readBuf(f.Extra)
for len(b) >= 4 { // need at least tag and size
tag := b.uint16()
size := b.uint16()
if int(size) > len(b) {
- return ErrFormat
+ break
}
if tag == zip64ExtraId {
- // update directory values from the zip64 extra block
+ // update directory values from the zip64 extra block.
+ // They should only be consulted if the sizes read earlier
+ // are maxed out.
+ // See golang.org/issue/13367.
eb := readBuf(b[:size])
- if len(eb) >= 8 {
+
+ if needUSize {
+ needUSize = false
+ if len(eb) < 8 {
+ return ErrFormat
+ }
f.UncompressedSize64 = eb.uint64()
}
- if len(eb) >= 8 {
+ if needCSize {
+ needCSize = false
+ if len(eb) < 8 {
+ return ErrFormat
+ }
f.CompressedSize64 = eb.uint64()
}
- if len(eb) >= 8 {
+ if needHeaderOffset {
+ needHeaderOffset = false
+ if len(eb) < 8 {
+ return ErrFormat
+ }
f.headerOffset = int64(eb.uint64())
}
+ break
}
b = b[size:]
}
- // Should have consumed the whole header.
- // But popular zip & JAR creation tools are broken and
- // may pad extra zeros at the end, so accept those
- // too. See golang.org/issue/8186.
- for _, v := range b {
- if v != 0 {
- return ErrFormat
- }
- }
}
+
+ if needUSize || needCSize || needHeaderOffset {
+ return ErrFormat
+ }
+
return nil
}
testZip64DirectoryRecordLength(buf, t)
}
+func TestZip64EdgeCase(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ // Test a zip file with uncompressed size 0xFFFFFFFF.
+ // That's the magic marker for a 64-bit file, so even though
+ // it fits in a 32-bit field we must use the 64-bit field.
+ // Go 1.5 and earlier got this wrong,
+ // writing an invalid zip file.
+ const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part
+ buf := testZip64(t, size)
+ testZip64DirectoryRecordLength(buf, t)
+}
+
func testZip64(t testing.TB, size int64) *rleBuffer {
const chunkSize = 1024
chunks := int(size / chunkSize)
- // write 2^32 bytes plus "END\n" to a zip file
+ // write size bytes plus "END\n" to a zip file
buf := new(rleBuffer)
w := NewWriter(buf)
f, err := w.CreateHeader(&FileHeader{
t.Fatal("write chunk:", err)
}
}
+ if frag := int(size % chunkSize); frag > 0 {
+ _, err := f.Write(chunk[:frag])
+ if err != nil {
+ t.Fatal("write chunk:", err)
+ }
+ }
end := []byte("END\n")
_, err = f.Write(end)
if err != nil {
t.Fatal("read:", err)
}
}
+ if frag := int(size % chunkSize); frag > 0 {
+ _, err := io.ReadFull(rc, chunk[:frag])
+ if err != nil {
+ t.Fatal("read:", err)
+ }
+ }
gotEnd, err := ioutil.ReadAll(rc)
if err != nil {
t.Fatal("read end:", err)
if err != nil {
t.Fatal("closing:", err)
}
- if size == 1<<32 {
+ if size+int64(len("END\n")) >= 1<<32-1 {
if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
- t.Errorf("UncompressedSize %d, want %d", got, want)
+ t.Errorf("UncompressedSize %#x, want %#x", got, want)
}
}
if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
- t.Errorf("UncompressedSize64 %d, want %d", got, want)
+ t.Errorf("UncompressedSize64 %#x, want %#x", got, want)
}
return buf
}
b := buf.Bytes()
- if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != nil {
+ zf, err := NewReader(bytes.NewReader(b), int64(len(b)))
+ if err != nil {
t.Fatalf("got %v, expected nil", err)
}
+ zh := zf.File[0].FileHeader
+ if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) {
+ t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi"))
+ }
}
// Issue 4302.
h := FileHeader{
Name: filename,
Method: Deflate,
- Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len
+ Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing
}
h.SetModTime(ts)
- testInvalidHeader(&h, t)
+ testValidHeader(&h, t)
}
func TestHeaderTooShort(t *testing.T) {
h := FileHeader{
Name: "foo.txt",
Method: Deflate,
- Extra: []byte{zip64ExtraId}, // missing size
+ Extra: []byte{zip64ExtraId}, // missing size and second half of tag, but Extra is best-effort parsing
}
- testInvalidHeader(&h, t)
+ testValidHeader(&h, t)
+}
+
+func TestHeaderIgnoredSize(t *testing.T) {
+ h := FileHeader{
+ Name: "foo.txt",
+ Method: Deflate,
+ Extra: []byte{zip64ExtraId & 0xFF, zip64ExtraId >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
+ }
+ testValidHeader(&h, t)
}
// Issue 4393. It is valid to have an extra data header