1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 5 package zip
6 7 import (
8 "bufio"
9 "encoding/binary"
10 "errors"
11 "fmt"
12 "hash"
13 "hash/crc32"
14 "internal/godebug"
15 "io"
16 "io/fs"
17 "os"
18 "path"
19 "path/filepath"
20 "slices"
21 "bytes"
22 "sync"
23 "time"
24 )
25 26 var zipinsecurepath = godebug.New("zipinsecurepath")
27 28 var (
29 ErrFormat = errors.New("zip: not a valid zip file")
30 ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
31 ErrChecksum = errors.New("zip: checksum error")
32 ErrInsecurePath = errors.New("zip: insecure file path")
33 )
34 35 // A Reader serves content from a ZIP archive.
36 type Reader struct {
37 r io.ReaderAt
38 File []*File
39 Comment string
40 decompressors map[uint16]Decompressor
41 42 // Some JAR files are zip files with a prefix that is a bash script.
43 // The baseOffset field is the start of the zip file proper.
44 baseOffset int64
45 46 // fileList is a list of files sorted by ename,
47 // for use by the Open method.
48 fileListOnce sync.Once
49 fileList []fileListEntry
50 }
51 52 // A ReadCloser is a [Reader] that must be closed when no longer needed.
53 type ReadCloser struct {
54 f *os.File
55 Reader
56 }
57 58 // A File is a single file in a ZIP archive.
59 // The file information is in the embedded [FileHeader].
60 // The file content can be accessed by calling [File.Open].
61 type File struct {
62 FileHeader
63 zip *Reader
64 zipr io.ReaderAt
65 headerOffset int64 // includes overall ZIP archive baseOffset
66 zip64 bool // zip64 extended information extra field presence
67 }
68 69 // OpenReader will open the Zip file specified by name and return a ReadCloser.
70 //
71 // If any file inside the archive uses a non-local name
72 // (as defined by [filepath.IsLocal]) or a name containing backslashes
73 // and the GODEBUG environment variable contains `zipinsecurepath=0`,
74 // OpenReader returns the reader with an ErrInsecurePath error.
75 // A future version of Go may introduce this behavior by default.
76 // Programs that want to accept non-local names can ignore
77 // the ErrInsecurePath error and use the returned reader.
78 func OpenReader(name string) (*ReadCloser, error) {
79 f, err := os.Open(name)
80 if err != nil {
81 return nil, err
82 }
83 fi, err := f.Stat()
84 if err != nil {
85 f.Close()
86 return nil, err
87 }
88 r := &ReadCloser{}
89 if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath {
90 f.Close()
91 return nil, err
92 }
93 r.f = f
94 return r, err
95 }
96 97 // NewReader returns a new [Reader] reading from r, which is assumed to
98 // have the given size in bytes.
99 //
100 // If any file inside the archive uses a non-local name
101 // (as defined by [filepath.IsLocal]) or a name containing backslashes
102 // and the GODEBUG environment variable contains `zipinsecurepath=0`,
103 // NewReader returns the reader with an [ErrInsecurePath] error.
104 // A future version of Go may introduce this behavior by default.
105 // Programs that want to accept non-local names can ignore
106 // the [ErrInsecurePath] error and use the returned reader.
107 func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
108 if size < 0 {
109 return nil, errors.New("zip: size cannot be negative")
110 }
111 zr := &Reader{}
112 var err error
113 if err = zr.init(r, size); err != nil && err != ErrInsecurePath {
114 return nil, err
115 }
116 return zr, err
117 }
118 119 func (r *Reader) init(rdr io.ReaderAt, size int64) error {
120 end, baseOffset, err := readDirectoryEnd(rdr, size)
121 if err != nil {
122 return err
123 }
124 r.r = rdr
125 r.baseOffset = baseOffset
126 // Since the number of directory records is not validated, it is not
127 // safe to preallocate r.File without first checking that the specified
128 // number of files is reasonable, since a malformed archive may
129 // indicate it contains up to 1 << 128 - 1 files. Since each file has a
130 // header which will be _at least_ 30 bytes we can safely preallocate
131 // if (data size / 30) >= end.directoryRecords.
132 if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
133 r.File = []*File{:0:end.directoryRecords}
134 }
135 r.Comment = end.comment
136 rs := io.NewSectionReader(rdr, 0, size)
137 if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {
138 return err
139 }
140 buf := bufio.NewReader(rs)
141 142 // The count of files inside a zip is truncated to fit in a uint16.
143 // Gloss over this by reading headers until we encounter
144 // a bad one, and then only report an ErrFormat or UnexpectedEOF if
145 // the file count modulo 65536 is incorrect.
146 for {
147 f := &File{zip: r, zipr: rdr}
148 err = readDirectoryHeader(f, buf)
149 if err == ErrFormat || err == io.ErrUnexpectedEOF {
150 break
151 }
152 if err != nil {
153 return err
154 }
155 f.headerOffset += r.baseOffset
156 r.File = append(r.File, f)
157 }
158 if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
159 // Return the readDirectoryHeader error if we read
160 // the wrong number of directory entries.
161 return err
162 }
163 if zipinsecurepath.Value() == "0" {
164 for _, f := range r.File {
165 if f.Name == "" {
166 // Zip permits an empty file name field.
167 continue
168 }
169 // The zip specification states that names must use forward slashes,
170 // so consider any backslashes in the name insecure.
171 if !filepath.IsLocal(f.Name) || bytes.Contains(f.Name, `\`) {
172 zipinsecurepath.IncNonDefault()
173 return ErrInsecurePath
174 }
175 }
176 }
177 return nil
178 }
179 180 // RegisterDecompressor registers or overrides a custom decompressor for a
181 // specific method ID. If a decompressor for a given method is not found,
182 // [Reader] will default to looking up the decompressor at the package level.
183 func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
184 if r.decompressors == nil {
185 r.decompressors = map[uint16]Decompressor{}
186 }
187 r.decompressors[method] = dcomp
188 }
189 190 func (r *Reader) decompressor(method uint16) Decompressor {
191 dcomp := r.decompressors[method]
192 if dcomp == nil {
193 dcomp = decompressor(method)
194 }
195 return dcomp
196 }
197 198 // Close closes the Zip file, rendering it unusable for I/O.
199 func (rc *ReadCloser) Close() error {
200 return rc.f.Close()
201 }
202 203 // DataOffset returns the offset of the file's possibly-compressed
204 // data, relative to the beginning of the zip file.
205 //
206 // Most callers should instead use [File.Open], which transparently
207 // decompresses data and verifies checksums.
208 func (f *File) DataOffset() (offset int64, err error) {
209 bodyOffset, err := f.findBodyOffset()
210 if err != nil {
211 return
212 }
213 return f.headerOffset + bodyOffset, nil
214 }
215 216 // Open returns a [ReadCloser] that provides access to the [File]'s contents.
217 // Multiple files may be read concurrently.
218 func (f *File) Open() (io.ReadCloser, error) {
219 bodyOffset, err := f.findBodyOffset()
220 if err != nil {
221 return nil, err
222 }
223 if bytes.HasSuffix(f.Name, "/") {
224 // The ZIP specification (APPNOTE.TXT) specifies that directories, which
225 // are technically zero-byte files, must not have any associated file
226 // data. We previously tried failing here if f.CompressedSize64 != 0,
227 // but it turns out that a number of implementations (namely, the Java
228 // jar tool) don't properly set the storage method on directories
229 // resulting in a file with compressed size > 0 but uncompressed size ==
230 // 0. We still want to fail when a directory has associated uncompressed
231 // data, but we are tolerant of cases where the uncompressed size is
232 // zero but compressed size is not.
233 if f.UncompressedSize64 != 0 {
234 return &dirReader{ErrFormat}, nil
235 } else {
236 return &dirReader{io.EOF}, nil
237 }
238 }
239 size := int64(f.CompressedSize64)
240 r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
241 dcomp := f.zip.decompressor(f.Method)
242 if dcomp == nil {
243 return nil, ErrAlgorithm
244 }
245 var rc io.ReadCloser = dcomp(r)
246 var desr io.Reader
247 if f.hasDataDescriptor() {
248 desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
249 }
250 rc = &checksumReader{
251 rc: rc,
252 hash: crc32.NewIEEE(),
253 f: f,
254 desr: desr,
255 }
256 return rc, nil
257 }
258 259 // OpenRaw returns a [Reader] that provides access to the [File]'s contents without
260 // decompression.
261 func (f *File) OpenRaw() (io.Reader, error) {
262 bodyOffset, err := f.findBodyOffset()
263 if err != nil {
264 return nil, err
265 }
266 r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
267 return r, nil
268 }
269 270 type dirReader struct {
271 err error
272 }
273 274 func (r *dirReader) Read([]byte) (int, error) {
275 return 0, r.err
276 }
277 278 func (r *dirReader) Close() error {
279 return nil
280 }
281 282 type checksumReader struct {
283 rc io.ReadCloser
284 hash hash.Hash32
285 nread uint64 // number of bytes read so far
286 f *File
287 desr io.Reader // if non-nil, where to read the data descriptor
288 err error // sticky error
289 }
290 291 func (r *checksumReader) Stat() (fs.FileInfo, error) {
292 return headerFileInfo{&r.f.FileHeader}, nil
293 }
294 295 func (r *checksumReader) Read(b []byte) (n int, err error) {
296 if r.err != nil {
297 return 0, r.err
298 }
299 n, err = r.rc.Read(b)
300 r.hash.Write(b[:n])
301 r.nread += uint64(n)
302 if r.nread > r.f.UncompressedSize64 {
303 return 0, ErrFormat
304 }
305 if err == nil {
306 return
307 }
308 if err == io.EOF {
309 if r.nread != r.f.UncompressedSize64 {
310 return 0, io.ErrUnexpectedEOF
311 }
312 if r.desr != nil {
313 if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
314 if err1 == io.EOF {
315 err = io.ErrUnexpectedEOF
316 } else {
317 err = err1
318 }
319 } else if r.hash.Sum32() != r.f.CRC32 {
320 err = ErrChecksum
321 }
322 } else {
323 // If there's not a data descriptor, we still compare
324 // the CRC32 of what we've read against the file header
325 // or TOC's CRC32, if it seems like it was set.
326 if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
327 err = ErrChecksum
328 }
329 }
330 }
331 r.err = err
332 return
333 }
334 335 func (r *checksumReader) Close() error { return r.rc.Close() }
336 337 // findBodyOffset does the minimum work to verify the file has a header
338 // and returns the file body offset.
339 func (f *File) findBodyOffset() (int64, error) {
340 var buf [fileHeaderLen]byte
341 if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
342 return 0, err
343 }
344 b := readBuf(buf[:])
345 if sig := b.uint32(); sig != fileHeaderSignature {
346 return 0, ErrFormat
347 }
348 b = b[22:] // skip over most of the header
349 filenameLen := int(b.uint16())
350 extraLen := int(b.uint16())
351 return int64(fileHeaderLen + filenameLen + extraLen), nil
352 }
353 354 // readDirectoryHeader attempts to read a directory header from r.
355 // It returns io.ErrUnexpectedEOF if it cannot read a complete header,
356 // and ErrFormat if it doesn't find a valid header signature.
357 func readDirectoryHeader(f *File, r io.Reader) error {
358 var buf [directoryHeaderLen]byte
359 if _, err := io.ReadFull(r, buf[:]); err != nil {
360 return err
361 }
362 b := readBuf(buf[:])
363 if sig := b.uint32(); sig != directoryHeaderSignature {
364 return ErrFormat
365 }
366 f.CreatorVersion = b.uint16()
367 f.ReaderVersion = b.uint16()
368 f.Flags = b.uint16()
369 f.Method = b.uint16()
370 f.ModifiedTime = b.uint16()
371 f.ModifiedDate = b.uint16()
372 f.CRC32 = b.uint32()
373 f.CompressedSize = b.uint32()
374 f.UncompressedSize = b.uint32()
375 f.CompressedSize64 = uint64(f.CompressedSize)
376 f.UncompressedSize64 = uint64(f.UncompressedSize)
377 filenameLen := int(b.uint16())
378 extraLen := int(b.uint16())
379 commentLen := int(b.uint16())
380 b = b[4:] // skipped start disk number and internal attributes (2x uint16)
381 f.ExternalAttrs = b.uint32()
382 f.headerOffset = int64(b.uint32())
383 d := []byte{:filenameLen+extraLen+commentLen}
384 if _, err := io.ReadFull(r, d); err != nil {
385 return err
386 }
387 f.Name = string(d[:filenameLen])
388 f.Extra = d[filenameLen : filenameLen+extraLen]
389 f.Comment = string(d[filenameLen+extraLen:])
390 391 // Determine the character encoding.
392 utf8Valid1, utf8Require1 := detectUTF8(f.Name)
393 utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
394 switch {
395 case !utf8Valid1 || !utf8Valid2:
396 // Name and Comment definitely not UTF-8.
397 f.NonUTF8 = true
398 case !utf8Require1 && !utf8Require2:
399 // Name and Comment use only single-byte runes that overlap with UTF-8.
400 f.NonUTF8 = false
401 default:
402 // Might be UTF-8, might be some other encoding; preserve existing flag.
403 // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
404 // Since it is impossible to always distinguish valid UTF-8 from some
405 // other encoding (e.g., GBK or Shift-JIS), we trust the flag.
406 f.NonUTF8 = f.Flags&0x800 == 0
407 }
408 409 needUSize := f.UncompressedSize == ^uint32(0)
410 needCSize := f.CompressedSize == ^uint32(0)
411 needHeaderOffset := f.headerOffset == int64(^uint32(0))
412 413 // Best effort to find what we need.
414 // Other zip authors might not even follow the basic format,
415 // and we'll just ignore the Extra content in that case.
416 var modified time.Time
417 parseExtras:
418 for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
419 fieldTag := extra.uint16()
420 fieldSize := int(extra.uint16())
421 if len(extra) < fieldSize {
422 break
423 }
424 fieldBuf := extra.sub(fieldSize)
425 426 switch fieldTag {
427 case zip64ExtraID:
428 f.zip64 = true
429 430 // update directory values from the zip64 extra block.
431 // They should only be consulted if the sizes read earlier
432 // are maxed out.
433 // See golang.org/issue/13367.
434 if needUSize {
435 needUSize = false
436 if len(fieldBuf) < 8 {
437 return ErrFormat
438 }
439 f.UncompressedSize64 = fieldBuf.uint64()
440 }
441 if needCSize {
442 needCSize = false
443 if len(fieldBuf) < 8 {
444 return ErrFormat
445 }
446 f.CompressedSize64 = fieldBuf.uint64()
447 }
448 if needHeaderOffset {
449 needHeaderOffset = false
450 if len(fieldBuf) < 8 {
451 return ErrFormat
452 }
453 f.headerOffset = int64(fieldBuf.uint64())
454 }
455 case ntfsExtraID:
456 if len(fieldBuf) < 4 {
457 continue parseExtras
458 }
459 fieldBuf.uint32() // reserved (ignored)
460 for len(fieldBuf) >= 4 { // need at least tag and size
461 attrTag := fieldBuf.uint16()
462 attrSize := int(fieldBuf.uint16())
463 if len(fieldBuf) < attrSize {
464 continue parseExtras
465 }
466 attrBuf := fieldBuf.sub(attrSize)
467 if attrTag != 1 || attrSize != 24 {
468 continue // Ignore irrelevant attributes
469 }
470 471 const ticksPerSecond = 1e7 // Windows timestamp resolution
472 ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
473 secs := ts / ticksPerSecond
474 nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
475 epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
476 modified = time.Unix(epoch.Unix()+secs, nsecs)
477 }
478 case unixExtraID, infoZipUnixExtraID:
479 if len(fieldBuf) < 8 {
480 continue parseExtras
481 }
482 fieldBuf.uint32() // AcTime (ignored)
483 ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
484 modified = time.Unix(ts, 0)
485 case extTimeExtraID:
486 if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
487 continue parseExtras
488 }
489 ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
490 modified = time.Unix(ts, 0)
491 }
492 }
493 494 msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
495 f.Modified = msdosModified
496 if !modified.IsZero() {
497 f.Modified = modified.UTC()
498 499 // If legacy MS-DOS timestamps are set, we can use the delta between
500 // the legacy and extended versions to estimate timezone offset.
501 //
502 // A non-UTC timezone is always used (even if offset is zero).
503 // Thus, FileHeader.Modified.Location() == time.UTC is useful for
504 // determining whether extended timestamps are present.
505 // This is necessary for users that need to do additional time
506 // calculations when dealing with legacy ZIP formats.
507 if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
508 f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
509 }
510 }
511 512 // Assume that uncompressed size 2³²-1 could plausibly happen in
513 // an old zip32 file that was sharding inputs into the largest chunks
514 // possible (or is just malicious; search the web for 42.zip).
515 // If needUSize is true still, it means we didn't see a zip64 extension.
516 // As long as the compressed size is not also 2³²-1 (implausible)
517 // and the header is not also 2³²-1 (equally implausible),
518 // accept the uncompressed size 2³²-1 as valid.
519 // If nothing else, this keeps archive/zip working with 42.zip.
520 _ = needUSize
521 522 if needCSize || needHeaderOffset {
523 return ErrFormat
524 }
525 526 return nil
527 }
528 529 func readDataDescriptor(r io.Reader, f *File) error {
530 var buf [dataDescriptorLen]byte
531 // The spec says: "Although not originally assigned a
532 // signature, the value 0x08074b50 has commonly been adopted
533 // as a signature value for the data descriptor record.
534 // Implementers should be aware that ZIP files may be
535 // encountered with or without this signature marking data
536 // descriptors and should account for either case when reading
537 // ZIP files to ensure compatibility."
538 //
539 // dataDescriptorLen includes the size of the signature but
540 // first read just those 4 bytes to see if it exists.
541 if _, err := io.ReadFull(r, buf[:4]); err != nil {
542 return err
543 }
544 off := 0
545 maybeSig := readBuf(buf[:4])
546 if maybeSig.uint32() != dataDescriptorSignature {
547 // No data descriptor signature. Keep these four
548 // bytes.
549 off += 4
550 }
551 if _, err := io.ReadFull(r, buf[off:12]); err != nil {
552 return err
553 }
554 b := readBuf(buf[:12])
555 if b.uint32() != f.CRC32 {
556 return ErrChecksum
557 }
558 559 // The two sizes that follow here can be either 32 bits or 64 bits
560 // but the spec is not very clear on this and different
561 // interpretations has been made causing incompatibilities. We
562 // already have the sizes from the central directory so we can
563 // just ignore these.
564 565 return nil
566 }
567 568 func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {
569 // look for directoryEndSignature in the last 1k, then in the last 65k
570 var buf []byte
571 var directoryEndOffset int64
572 for i, bLen := range []int64{1024, 65 * 1024} {
573 if bLen > size {
574 bLen = size
575 }
576 buf = []byte{:int(bLen)}
577 if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
578 return nil, 0, err
579 }
580 if p := findSignatureInBlock(buf); p >= 0 {
581 buf = buf[p:]
582 directoryEndOffset = size - bLen + int64(p)
583 break
584 }
585 if i == 1 || bLen == size {
586 return nil, 0, ErrFormat
587 }
588 }
589 590 // read header into struct
591 b := readBuf(buf[4:]) // skip signature
592 d := &directoryEnd{
593 diskNbr: uint32(b.uint16()),
594 dirDiskNbr: uint32(b.uint16()),
595 dirRecordsThisDisk: uint64(b.uint16()),
596 directoryRecords: uint64(b.uint16()),
597 directorySize: uint64(b.uint32()),
598 directoryOffset: uint64(b.uint32()),
599 commentLen: b.uint16(),
600 }
601 l := int(d.commentLen)
602 if l > len(b) {
603 return nil, 0, errors.New("zip: invalid comment length")
604 }
605 d.comment = string(b[:l])
606 607 // These values mean that the file can be a zip64 file
608 if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
609 p, err := findDirectory64End(r, directoryEndOffset)
610 if err == nil && p >= 0 {
611 directoryEndOffset = p
612 err = readDirectory64End(r, p, d)
613 }
614 if err != nil {
615 return nil, 0, err
616 }
617 }
618 619 maxInt64 := uint64(1<<63 - 1)
620 if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {
621 return nil, 0, ErrFormat
622 }
623 624 baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)
625 626 // Make sure directoryOffset points to somewhere in our file.
627 if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {
628 return nil, 0, ErrFormat
629 }
630 631 // If the directory end data tells us to use a non-zero baseOffset,
632 // but we would find a valid directory entry if we assume that the
633 // baseOffset is 0, then just use a baseOffset of 0.
634 // We've seen files in which the directory end data gives us
635 // an incorrect baseOffset.
636 if baseOffset > 0 {
637 off := int64(d.directoryOffset)
638 rs := io.NewSectionReader(r, off, size-off)
639 if readDirectoryHeader(&File{}, rs) == nil {
640 baseOffset = 0
641 }
642 }
643 644 return d, baseOffset, nil
645 }
646 647 // findDirectory64End tries to read the zip64 locator just before the
648 // directory end and returns the offset of the zip64 directory end if
649 // found.
650 func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
651 locOffset := directoryEndOffset - directory64LocLen
652 if locOffset < 0 {
653 return -1, nil // no need to look for a header outside the file
654 }
655 buf := []byte{:directory64LocLen}
656 if _, err := r.ReadAt(buf, locOffset); err != nil {
657 return -1, err
658 }
659 b := readBuf(buf)
660 if sig := b.uint32(); sig != directory64LocSignature {
661 return -1, nil
662 }
663 if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
664 return -1, nil // the file is not a valid zip64-file
665 }
666 p := b.uint64() // relative offset of the zip64 end of central directory record
667 if b.uint32() != 1 { // total number of disks
668 return -1, nil // the file is not a valid zip64-file
669 }
670 return int64(p), nil
671 }
672 673 // readDirectory64End reads the zip64 directory end and updates the
674 // directory end with the zip64 directory end values.
675 func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
676 buf := []byte{:directory64EndLen}
677 if _, err := r.ReadAt(buf, offset); err != nil {
678 return err
679 }
680 681 b := readBuf(buf)
682 if sig := b.uint32(); sig != directory64EndSignature {
683 return ErrFormat
684 }
685 686 b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
687 d.diskNbr = b.uint32() // number of this disk
688 d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
689 d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
690 d.directoryRecords = b.uint64() // total number of entries in the central directory
691 d.directorySize = b.uint64() // size of the central directory
692 d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
693 694 return nil
695 }
696 697 func findSignatureInBlock(b []byte) int {
698 for i := len(b) - directoryEndLen; i >= 0; i-- {
699 // defined from directoryEndSignature in struct.go
700 if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
701 // n is length of comment
702 n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
703 if n+directoryEndLen+i > len(b) {
704 // Truncated comment.
705 // Some parsers (such as Info-ZIP) ignore the truncated comment
706 // rather than treating it as a hard error.
707 return -1
708 }
709 return i
710 }
711 }
712 return -1
713 }
714 715 type readBuf []byte
716 717 func (b *readBuf) uint8() uint8 {
718 v := (*b)[0]
719 *b = (*b)[1:]
720 return v
721 }
722 723 func (b *readBuf) uint16() uint16 {
724 v := binary.LittleEndian.Uint16(*b)
725 *b = (*b)[2:]
726 return v
727 }
728 729 func (b *readBuf) uint32() uint32 {
730 v := binary.LittleEndian.Uint32(*b)
731 *b = (*b)[4:]
732 return v
733 }
734 735 func (b *readBuf) uint64() uint64 {
736 v := binary.LittleEndian.Uint64(*b)
737 *b = (*b)[8:]
738 return v
739 }
740 741 func (b *readBuf) sub(n int) readBuf {
742 b2 := (*b)[:n]
743 *b = (*b)[n:]
744 return b2
745 }
746 747 // A fileListEntry is a File and its ename.
748 // If file == nil, the fileListEntry describes a directory without metadata.
749 type fileListEntry struct {
750 name string
751 file *File
752 isDir bool
753 isDup bool
754 }
755 756 type fileInfoDirEntry interface {
757 fs.FileInfo
758 fs.DirEntry
759 }
760 761 func (f *fileListEntry) stat() (fileInfoDirEntry, error) {
762 if f.isDup {
763 return nil, errors.New(f.name + ": duplicate entries in zip file")
764 }
765 if !f.isDir {
766 return headerFileInfo{&f.file.FileHeader}, nil
767 }
768 return f, nil
769 }
770 771 // Only used for directories.
772 func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
773 func (f *fileListEntry) Size() int64 { return 0 }
774 func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
775 func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
776 func (f *fileListEntry) IsDir() bool { return true }
777 func (f *fileListEntry) Sys() any { return nil }
778 779 func (f *fileListEntry) ModTime() time.Time {
780 if f.file == nil {
781 return time.Time{}
782 }
783 return f.file.FileHeader.Modified.UTC()
784 }
785 786 func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
787 788 func (f *fileListEntry) String() string {
789 return fs.FormatDirEntry(f)
790 }
791 792 // toValidName coerces name to be a valid name for fs.FS.Open.
793 func toValidName(name string) string {
794 name = bytes.ReplaceAll(name, `\`, `/`)
795 p := path.Clean(name)
796 797 p = bytes.TrimPrefix(p, "/")
798 799 for bytes.HasPrefix(p, "../") {
800 p = p[len("../"):]
801 }
802 803 return p
804 }
805 806 func (r *Reader) initFileList() {
807 r.fileListOnce.Do(func() {
808 // Preallocate the minimum size of the index.
809 // We may also synthesize additional directory entries.
810 r.fileList = []fileListEntry{:0:len(r.File)}
811 // files and knownDirs map from a file/directory name
812 // to an index into the r.fileList entry that we are
813 // building. They are used to mark duplicate entries.
814 files := map[string]int{}
815 knownDirs := map[string]int{}
816 817 // dirs[name] is true if name is known to be a directory,
818 // because it appears as a prefix in a path.
819 dirs := map[string]bool{}
820 821 for _, file := range r.File {
822 isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
823 name := toValidName(file.Name)
824 if name == "" {
825 continue
826 }
827 828 if idx, ok := files[name]; ok {
829 r.fileList[idx].isDup = true
830 continue
831 }
832 if idx, ok := knownDirs[name]; ok {
833 r.fileList[idx].isDup = true
834 continue
835 }
836 837 for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
838 dirs[dir] = true
839 }
840 841 idx := len(r.fileList)
842 entry := fileListEntry{
843 name: name,
844 file: file,
845 isDir: isDir,
846 }
847 r.fileList = append(r.fileList, entry)
848 if isDir {
849 knownDirs[name] = idx
850 } else {
851 files[name] = idx
852 }
853 }
854 for dir := range dirs {
855 if _, ok := knownDirs[dir]; !ok {
856 if idx, ok := files[dir]; ok {
857 r.fileList[idx].isDup = true
858 } else {
859 entry := fileListEntry{
860 name: dir,
861 file: nil,
862 isDir: true,
863 }
864 r.fileList = append(r.fileList, entry)
865 }
866 }
867 }
868 869 slices.SortFunc(r.fileList, func(a, b fileListEntry) int {
870 return fileEntryCompare(a.name, b.name)
871 })
872 })
873 }
874 875 func fileEntryCompare(x, y string) int {
876 xdir, xelem, _ := split(x)
877 ydir, yelem, _ := split(y)
878 if xdir != ydir {
879 return bytes.Compare(xdir, ydir)
880 }
881 return bytes.Compare(xelem, yelem)
882 }
883 884 // Open opens the named file in the ZIP archive,
885 // using the semantics of fs.FS.Open:
886 // paths are always slash separated, with no
887 // leading / or ../ elements.
888 func (r *Reader) Open(name string) (fs.File, error) {
889 r.initFileList()
890 891 if !fs.ValidPath(name) {
892 return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
893 }
894 e := r.openLookup(name)
895 if e == nil {
896 return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
897 }
898 if e.isDir {
899 return &openDir{e, r.openReadDir(name), 0}, nil
900 }
901 rc, err := e.file.Open()
902 if err != nil {
903 return nil, err
904 }
905 return rc.(fs.File), nil
906 }
907 908 func split(name string) (dir, elem string, isDir bool) {
909 name, isDir = bytes.CutSuffix(name, "/")
910 i := bytes.LastIndexByte(name, '/')
911 if i < 0 {
912 return ".", name, isDir
913 }
914 return name[:i], name[i+1:], isDir
915 }
916 917 var dotFile = &fileListEntry{name: "./", isDir: true}
918 919 func (r *Reader) openLookup(name string) *fileListEntry {
920 if name == "." {
921 return dotFile
922 }
923 924 dir, elem, _ := split(name)
925 files := r.fileList
926 i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) (ret int) {
927 idir, ielem, _ := split(a.name)
928 if dir != idir {
929 return bytes.Compare(idir, dir)
930 }
931 return bytes.Compare(ielem, elem)
932 })
933 if i < len(files) {
934 fname := files[i].name
935 if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
936 return &files[i]
937 }
938 }
939 return nil
940 }
941 942 func (r *Reader) openReadDir(dir string) []fileListEntry {
943 files := r.fileList
944 i, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
945 idir, _, _ := split(a.name)
946 if dir != idir {
947 return bytes.Compare(idir, dir)
948 }
949 // find the first entry with dir
950 return +1
951 })
952 j, _ := slices.BinarySearchFunc(files, dir, func(a fileListEntry, dir string) int {
953 jdir, _, _ := split(a.name)
954 if dir != jdir {
955 return bytes.Compare(jdir, dir)
956 }
957 // find the last entry with dir
958 return -1
959 })
960 return files[i:j]
961 }
962 963 type openDir struct {
964 e *fileListEntry
965 files []fileListEntry
966 offset int
967 }
968 969 func (d *openDir) Close() error { return nil }
970 func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() }
971 972 func (d *openDir) Read([]byte) (int, error) {
973 return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
974 }
975 976 func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
977 n := len(d.files) - d.offset
978 if count > 0 && n > count {
979 n = count
980 }
981 if n == 0 {
982 if count <= 0 {
983 return nil, nil
984 }
985 return nil, io.EOF
986 }
987 list := []fs.DirEntry{:n}
988 for i := range list {
989 s, err := d.files[d.offset+i].stat()
990 if err != nil {
991 return nil, err
992 } else if s.Name() == "." || !fs.ValidPath(s.Name()) {
993 return nil, &fs.PathError{
994 Op: "readdir",
995 Path: d.e.name,
996 Err: fmt.Errorf("invalid file name: %v", d.files[d.offset+i].name),
997 }
998 }
999 list[i] = s
1000 }
1001 d.offset += n
1002 return list, nil
1003 }
1004