diff options
Diffstat (limited to 'llgo/third_party/gofrontend/libgo/go/archive/zip')
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/reader.go | 32 | ||||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/reader_test.go | 74 | ||||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/struct.go | 6 | ||||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.notzip | bin | 1905 -> 1906 bytes | |||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.zip | bin | 1885 -> 1886 bytes | |||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/writer.go | 37 | ||||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/writer_test.go | 35 | ||||
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/archive/zip/zip_test.go | 36 |
8 files changed, 198 insertions, 22 deletions
diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/reader.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/reader.go index 8136b840d45..519748bac40 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/reader.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/reader.go @@ -8,6 +8,7 @@ import ( "bufio" "encoding/binary" "errors" + "fmt" "hash" "hash/crc32" "io" @@ -77,6 +78,9 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { if err != nil { return err } + if end.directoryRecords > uint64(size)/fileHeaderLen { + return fmt.Errorf("archive/zip: TOC declares impossible %d files in %d byte zip", end.directoryRecords, size) + } z.r = r z.File = make([]*File, 0, end.directoryRecords) z.Comment = end.comment @@ -146,16 +150,22 @@ func (f *File) Open() (rc io.ReadCloser, err error) { if f.hasDataDescriptor() { desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) } - rc = &checksumReader{rc, crc32.NewIEEE(), f, desr, nil} + rc = &checksumReader{ + rc: rc, + hash: crc32.NewIEEE(), + f: f, + desr: desr, + } return } type checksumReader struct { - rc io.ReadCloser - hash hash.Hash32 - f *File - desr io.Reader // if non-nil, where to read the data descriptor - err error // sticky error + rc io.ReadCloser + hash hash.Hash32 + nread uint64 // number of bytes read so far + f *File + desr io.Reader // if non-nil, where to read the data descriptor + err error // sticky error } func (r *checksumReader) Read(b []byte) (n int, err error) { @@ -164,13 +174,21 @@ func (r *checksumReader) Read(b []byte) (n int, err error) { } n, err = r.rc.Read(b) r.hash.Write(b[:n]) + r.nread += uint64(n) if err == nil { return } if err == io.EOF { + if r.nread != r.f.UncompressedSize64 { + return 0, io.ErrUnexpectedEOF + } if r.desr != nil { if err1 := readDataDescriptor(r.desr, r.f); err1 != nil { - err = err1 + if err1 == io.EOF { + err = io.ErrUnexpectedEOF + } else { + err = err1 + } } else if r.hash.Sum32() != r.f.CRC32 { err = ErrChecksum } diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/reader_test.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/reader_test.go index 29d0652dcc1..547dd39048e 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/reader_test.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/reader_test.go @@ -531,3 +531,77 @@ func TestIssue8186(t *testing.T) { } } } + +// Verify we return ErrUnexpectedEOF when length is short. +func TestIssue10957(t *testing.T) { + data := []byte("PK\x03\x040000000PK\x01\x0200000" + + "0000000000000000000\x00" + + "\x00\x00\x00\x00\x00000000000000PK\x01" + + "\x020000000000000000000" + + "00000\v\x00\x00\x00\x00\x00000000000" + + "00000000000000PK\x01\x0200" + + "00000000000000000000" + + "00\v\x00\x00\x00\x00\x00000000000000" + + "00000000000PK\x01\x020000<" + + "0\x00\x0000000000000000\v\x00\v" + + "\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" + + "00000000PK\x01\x0200000000" + + "0000000000000000\v\x00\x00\x00" + + "\x00\x0000PK\x05\x06000000\x05\x000000" + + "\v\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + for i, f := range z.File { + r, err := f.Open() + if err != nil { + continue + } + if f.UncompressedSize64 < 1e6 { + n, err := io.Copy(ioutil.Discard, r) + if i == 3 && err != io.ErrUnexpectedEOF { + t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err) + } + if err == nil && uint64(n) != f.UncompressedSize64 { + t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64) + } + } + r.Close() + } +} + +// Verify the number of files is sane. +func TestIssue10956(t *testing.T) { + data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" + + "0000PK\x05\x06000000000000" + + "0000\v\x00000\x00\x00\x00\x00\x00\x00\x000") + _, err := NewReader(bytes.NewReader(data), int64(len(data))) + const want = "TOC declares impossible 3472328296227680304 files in 57 byte" + if err == nil && !strings.Contains(err.Error(), want) { + t.Errorf("error = %v; want %q", err, want) + } +} + +// Verify we return ErrUnexpectedEOF when reading truncated data descriptor. +func TestIssue11146(t *testing.T) { + data := []byte("PK\x03\x040000000000000000" + + "000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" + + "0000000000000000PK\x01\x02" + + "0000\b0\b\x00000000000000" + + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" + + "\x00\x0000\x01\x0000008\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + r, err := z.File[0].Open() + if err != nil { + t.Fatal(err) + } + _, err = ioutil.ReadAll(r) + if err != io.ErrUnexpectedEOF { + t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err) + } + r.Close() +} diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/struct.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/struct.go index cb28e832423..137d0495fd9 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/struct.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/struct.go @@ -81,8 +81,8 @@ type FileHeader struct { ModifiedTime uint16 // MS-DOS time ModifiedDate uint16 // MS-DOS date CRC32 uint32 - CompressedSize uint32 // deprecated; use CompressedSize64 - UncompressedSize uint32 // deprecated; use UncompressedSize64 + CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. + UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. CompressedSize64 uint64 UncompressedSize64 uint64 Extra []byte @@ -233,7 +233,7 @@ func (h *FileHeader) SetMode(mode os.FileMode) { } } -// isZip64 returns true if the file size exceeds the 32 bit limit +// isZip64 reports whether the file size exceeds the 32 bit limit func (fh *FileHeader) isZip64() bool { return fh.CompressedSize64 > uint32max || fh.UncompressedSize64 > uint32max } diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.notzip b/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.notzip Binary files differindex 06668c4c1c0..81737275c6e 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.notzip +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.notzip diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.zip b/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.zip Binary files differindex db3bb900e4e..5642a67e77d 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.zip +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/testdata/readme.zip diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/writer.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/writer.go index 170beec0eec..3be2b5fdb2f 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/writer.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/writer.go @@ -34,6 +34,17 @@ func NewWriter(w io.Writer) *Writer { return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} } +// SetOffset sets the offset of the beginning of the zip data within the +// underlying writer. It should be used when the zip data is appended to an +// existing file, such as a binary executable. +// It must be called before any data is written. +func (w *Writer) SetOffset(n int64) { + if w.cw.count != 0 { + panic("zip: SetOffset called after data was written") + } + w.cw.count = n +} + // Flush flushes any buffered data to the underlying writer. // Calling Flush is not normally necessary; calling Close is sufficient. func (w *Writer) Flush() error { @@ -122,15 +133,15 @@ func (w *Writer) Close() error { // zip64 end of central directory record b.uint32(directory64EndSignature) - b.uint64(directory64EndLen) - b.uint16(zipVersion45) // version made by - b.uint16(zipVersion45) // version needed to extract - b.uint32(0) // number of this disk - b.uint32(0) // number of the disk with the start of the central directory - b.uint64(records) // total number of entries in the central directory on this disk - b.uint64(records) // total number of entries in the central directory - b.uint64(size) // size of the central directory - b.uint64(offset) // offset of start of central directory with respect to the starting disk number + b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64) + b.uint16(zipVersion45) // version made by + b.uint16(zipVersion45) // version needed to extract + b.uint32(0) // number of this disk + b.uint32(0) // number of the disk with the start of the central directory + b.uint64(records) // total number of entries in the central directory on this disk + b.uint64(records) // total number of entries in the central directory + b.uint64(size) // size of the central directory + b.uint64(offset) // offset of start of central directory with respect to the starting disk number // zip64 end of central directory locator b.uint32(directory64LocSignature) @@ -184,14 +195,20 @@ func (w *Writer) Create(name string) (io.Writer, error) { // CreateHeader adds a file to the zip file using the provided FileHeader // for the file metadata. // It returns a Writer to which the file contents should be written. +// // The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, or Close. +// call to Create, CreateHeader, or Close. The provided FileHeader fh +// must not be modified after a call to CreateHeader. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { if w.last != nil && !w.last.closed { if err := w.last.close(); err != nil { return nil, err } } + if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return nil, errors.New("archive/zip: invalid duplicate FileHeader") + } fh.Flags |= 0x8 // we will write a data descriptor diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/writer_test.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/writer_test.go index 184a7d96a7f..01b63f2358d 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/writer_test.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/writer_test.go @@ -87,6 +87,41 @@ func TestWriter(t *testing.T) { } } +func TestWriterOffset(t *testing.T) { + largeData := make([]byte, 1<<17) + for i := range largeData { + largeData[i] = byte(rand.Int()) + } + writeTests[1].Data = largeData + defer func() { + writeTests[1].Data = nil + }() + + // write a zip file + buf := new(bytes.Buffer) + existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3} + n, _ := buf.Write(existingData) + w := NewWriter(buf) + w.SetOffset(int64(n)) + + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + func TestWriterFlush(t *testing.T) { var buf bytes.Buffer w := NewWriter(struct{ io.Writer }{&buf}) diff --git a/llgo/third_party/gofrontend/libgo/go/archive/zip/zip_test.go b/llgo/third_party/gofrontend/libgo/go/archive/zip/zip_test.go index 32a16a79efb..f00ff47d37e 100644 --- a/llgo/third_party/gofrontend/libgo/go/archive/zip/zip_test.go +++ b/llgo/third_party/gofrontend/libgo/go/archive/zip/zip_test.go @@ -229,10 +229,11 @@ func TestZip64(t *testing.T) { t.Skip("slow test; skipping") } const size = 1 << 32 // before the "END\n" part - testZip64(t, size) + buf := testZip64(t, size) + testZip64DirectoryRecordLength(buf, t) } -func testZip64(t testing.TB, size int64) { +func testZip64(t testing.TB, size int64) *rleBuffer { const chunkSize = 1024 chunks := int(size / chunkSize) // write 2^32 bytes plus "END\n" to a zip file @@ -302,6 +303,37 @@ func testZip64(t testing.TB, size int64) { if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want { t.Errorf("UncompressedSize64 %d, want %d", got, want) } + + return buf +} + +// Issue 9857 +func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) { + d := make([]byte, 1024) + if _, err := buf.ReadAt(d, buf.Size()-int64(len(d))); err != nil { + t.Fatal("read:", err) + } + + sigOff := findSignatureInBlock(d) + dirOff, err := findDirectory64End(buf, buf.Size()-int64(len(d))+int64(sigOff)) + if err != nil { + t.Fatal("findDirectory64End:", err) + } + + d = make([]byte, directory64EndLen) + if _, err := buf.ReadAt(d, dirOff); err != nil { + t.Fatal("read:", err) + } + + b := readBuf(d) + if sig := b.uint32(); sig != directory64EndSignature { + t.Fatalf("Expected directory64EndSignature (%d), got %d", directory64EndSignature, sig) + } + + size := b.uint64() + if size != directory64EndLen-12 { + t.Fatalf("Expected length of %d, got %d", directory64EndLen-12, size) + } } func testInvalidHeader(h *FileHeader, t *testing.T) { |

