runtime: be more strict in GC
[gcc.git] / libgo / go / archive / tar / reader.go
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package tar
6
7 import (
8 "bytes"
9 "io"
10 "io/ioutil"
11 "strconv"
12 "strings"
13 "time"
14 )
15
16 // Reader provides sequential access to the contents of a tar archive.
17 // Reader.Next advances to the next file in the archive (including the first),
18 // and then Reader can be treated as an io.Reader to access the file's data.
19 type Reader struct {
20 r io.Reader
21 pad int64 // Amount of padding (ignored) after current file entry
22 curr fileReader // Reader for current file entry
23 blk block // Buffer to use as temporary local storage
24
25 // err is a persistent error.
26 // It is only the responsibility of every exported method of Reader to
27 // ensure that this error is sticky.
28 err error
29 }
30
31 type fileReader interface {
32 io.Reader
33 fileState
34
35 WriteTo(io.Writer) (int64, error)
36 }
37
38 // NewReader creates a new Reader reading from r.
39 func NewReader(r io.Reader) *Reader {
40 return &Reader{r: r, curr: &regFileReader{r, 0}}
41 }
42
43 // Next advances to the next entry in the tar archive.
44 // The Header.Size determines how many bytes can be read for the next file.
45 // Any remaining data in the current file is automatically discarded.
46 //
47 // io.EOF is returned at the end of the input.
48 func (tr *Reader) Next() (*Header, error) {
49 if tr.err != nil {
50 return nil, tr.err
51 }
52 hdr, err := tr.next()
53 tr.err = err
54 return hdr, err
55 }
56
57 func (tr *Reader) next() (*Header, error) {
58 var paxHdrs map[string]string
59 var gnuLongName, gnuLongLink string
60
61 // Externally, Next iterates through the tar archive as if it is a series of
62 // files. Internally, the tar format often uses fake "files" to add meta
63 // data that describes the next file. These meta data "files" should not
64 // normally be visible to the outside. As such, this loop iterates through
65 // one or more "header files" until it finds a "normal file".
66 format := FormatUSTAR | FormatPAX | FormatGNU
67 for {
68 // Discard the remainder of the file and any padding.
69 if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
70 return nil, err
71 }
72 if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
73 return nil, err
74 }
75 tr.pad = 0
76
77 hdr, rawHdr, err := tr.readHeader()
78 if err != nil {
79 return nil, err
80 }
81 if err := tr.handleRegularFile(hdr); err != nil {
82 return nil, err
83 }
84 format.mayOnlyBe(hdr.Format)
85
86 // Check for PAX/GNU special headers and files.
87 switch hdr.Typeflag {
88 case TypeXHeader, TypeXGlobalHeader:
89 format.mayOnlyBe(FormatPAX)
90 paxHdrs, err = parsePAX(tr)
91 if err != nil {
92 return nil, err
93 }
94 if hdr.Typeflag == TypeXGlobalHeader {
95 mergePAX(hdr, paxHdrs)
96 return &Header{
97 Name: hdr.Name,
98 Typeflag: hdr.Typeflag,
99 Xattrs: hdr.Xattrs,
100 PAXRecords: hdr.PAXRecords,
101 Format: format,
102 }, nil
103 }
104 continue // This is a meta header affecting the next header
105 case TypeGNULongName, TypeGNULongLink:
106 format.mayOnlyBe(FormatGNU)
107 realname, err := ioutil.ReadAll(tr)
108 if err != nil {
109 return nil, err
110 }
111
112 var p parser
113 switch hdr.Typeflag {
114 case TypeGNULongName:
115 gnuLongName = p.parseString(realname)
116 case TypeGNULongLink:
117 gnuLongLink = p.parseString(realname)
118 }
119 continue // This is a meta header affecting the next header
120 default:
121 // The old GNU sparse format is handled here since it is technically
122 // just a regular file with additional attributes.
123
124 if err := mergePAX(hdr, paxHdrs); err != nil {
125 return nil, err
126 }
127 if gnuLongName != "" {
128 hdr.Name = gnuLongName
129 }
130 if gnuLongLink != "" {
131 hdr.Linkname = gnuLongLink
132 }
133 if hdr.Typeflag == TypeRegA {
134 if strings.HasSuffix(hdr.Name, "/") {
135 hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
136 } else {
137 hdr.Typeflag = TypeReg
138 }
139 }
140
141 // The extended headers may have updated the size.
142 // Thus, setup the regFileReader again after merging PAX headers.
143 if err := tr.handleRegularFile(hdr); err != nil {
144 return nil, err
145 }
146
147 // Sparse formats rely on being able to read from the logical data
148 // section; there must be a preceding call to handleRegularFile.
149 if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
150 return nil, err
151 }
152
153 // Set the final guess at the format.
154 if format.has(FormatUSTAR) && format.has(FormatPAX) {
155 format.mayOnlyBe(FormatUSTAR)
156 }
157 hdr.Format = format
158 return hdr, nil // This is a file, so stop
159 }
160 }
161 }
162
163 // handleRegularFile sets up the current file reader and padding such that it
164 // can only read the following logical data section. It will properly handle
165 // special headers that contain no data section.
166 func (tr *Reader) handleRegularFile(hdr *Header) error {
167 nb := hdr.Size
168 if isHeaderOnlyType(hdr.Typeflag) {
169 nb = 0
170 }
171 if nb < 0 {
172 return ErrHeader
173 }
174
175 tr.pad = blockPadding(nb)
176 tr.curr = &regFileReader{r: tr.r, nb: nb}
177 return nil
178 }
179
180 // handleSparseFile checks if the current file is a sparse format of any type
181 // and sets the curr reader appropriately.
182 func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
183 var spd sparseDatas
184 var err error
185 if hdr.Typeflag == TypeGNUSparse {
186 spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
187 } else {
188 spd, err = tr.readGNUSparsePAXHeaders(hdr)
189 }
190
191 // If sp is non-nil, then this is a sparse file.
192 // Note that it is possible for len(sp) == 0.
193 if err == nil && spd != nil {
194 if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
195 return ErrHeader
196 }
197 sph := invertSparseEntries(spd, hdr.Size)
198 tr.curr = &sparseFileReader{tr.curr, sph, 0}
199 }
200 return err
201 }
202
203 // readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
204 // If they are found, then this function reads the sparse map and returns it.
205 // This assumes that 0.0 headers have already been converted to 0.1 headers
206 // by the PAX header parsing logic.
207 func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
208 // Identify the version of GNU headers.
209 var is1x0 bool
210 major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
211 switch {
212 case major == "0" && (minor == "0" || minor == "1"):
213 is1x0 = false
214 case major == "1" && minor == "0":
215 is1x0 = true
216 case major != "" || minor != "":
217 return nil, nil // Unknown GNU sparse PAX version
218 case hdr.PAXRecords[paxGNUSparseMap] != "":
219 is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
220 default:
221 return nil, nil // Not a PAX format GNU sparse file.
222 }
223 hdr.Format.mayOnlyBe(FormatPAX)
224
225 // Update hdr from GNU sparse PAX headers.
226 if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
227 hdr.Name = name
228 }
229 size := hdr.PAXRecords[paxGNUSparseSize]
230 if size == "" {
231 size = hdr.PAXRecords[paxGNUSparseRealSize]
232 }
233 if size != "" {
234 n, err := strconv.ParseInt(size, 10, 64)
235 if err != nil {
236 return nil, ErrHeader
237 }
238 hdr.Size = n
239 }
240
241 // Read the sparse map according to the appropriate format.
242 if is1x0 {
243 return readGNUSparseMap1x0(tr.curr)
244 }
245 return readGNUSparseMap0x1(hdr.PAXRecords)
246 }
247
248 // mergePAX merges paxHdrs into hdr for all relevant fields of Header.
249 func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
250 for k, v := range paxHdrs {
251 if v == "" {
252 continue // Keep the original USTAR value
253 }
254 var id64 int64
255 switch k {
256 case paxPath:
257 hdr.Name = v
258 case paxLinkpath:
259 hdr.Linkname = v
260 case paxUname:
261 hdr.Uname = v
262 case paxGname:
263 hdr.Gname = v
264 case paxUid:
265 id64, err = strconv.ParseInt(v, 10, 64)
266 hdr.Uid = int(id64) // Integer overflow possible
267 case paxGid:
268 id64, err = strconv.ParseInt(v, 10, 64)
269 hdr.Gid = int(id64) // Integer overflow possible
270 case paxAtime:
271 hdr.AccessTime, err = parsePAXTime(v)
272 case paxMtime:
273 hdr.ModTime, err = parsePAXTime(v)
274 case paxCtime:
275 hdr.ChangeTime, err = parsePAXTime(v)
276 case paxSize:
277 hdr.Size, err = strconv.ParseInt(v, 10, 64)
278 default:
279 if strings.HasPrefix(k, paxSchilyXattr) {
280 if hdr.Xattrs == nil {
281 hdr.Xattrs = make(map[string]string)
282 }
283 hdr.Xattrs[k[len(paxSchilyXattr):]] = v
284 }
285 }
286 if err != nil {
287 return ErrHeader
288 }
289 }
290 hdr.PAXRecords = paxHdrs
291 return nil
292 }
293
294 // parsePAX parses PAX headers.
295 // If an extended header (type 'x') is invalid, ErrHeader is returned
296 func parsePAX(r io.Reader) (map[string]string, error) {
297 buf, err := ioutil.ReadAll(r)
298 if err != nil {
299 return nil, err
300 }
301 sbuf := string(buf)
302
303 // For GNU PAX sparse format 0.0 support.
304 // This function transforms the sparse format 0.0 headers into format 0.1
305 // headers since 0.0 headers were not PAX compliant.
306 var sparseMap []string
307
308 paxHdrs := make(map[string]string)
309 for len(sbuf) > 0 {
310 key, value, residual, err := parsePAXRecord(sbuf)
311 if err != nil {
312 return nil, ErrHeader
313 }
314 sbuf = residual
315
316 switch key {
317 case paxGNUSparseOffset, paxGNUSparseNumBytes:
318 // Validate sparse header order and value.
319 if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
320 (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
321 strings.Contains(value, ",") {
322 return nil, ErrHeader
323 }
324 sparseMap = append(sparseMap, value)
325 default:
326 paxHdrs[key] = value
327 }
328 }
329 if len(sparseMap) > 0 {
330 paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
331 }
332 return paxHdrs, nil
333 }
334
335 // readHeader reads the next block header and assumes that the underlying reader
336 // is already aligned to a block boundary. It returns the raw block of the
337 // header in case further processing is required.
338 //
339 // The err will be set to io.EOF only when one of the following occurs:
340 // * Exactly 0 bytes are read and EOF is hit.
341 // * Exactly 1 block of zeros is read and EOF is hit.
342 // * At least 2 blocks of zeros are read.
343 func (tr *Reader) readHeader() (*Header, *block, error) {
344 // Two blocks of zero bytes marks the end of the archive.
345 if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
346 return nil, nil, err // EOF is okay here; exactly 0 bytes read
347 }
348 if bytes.Equal(tr.blk[:], zeroBlock[:]) {
349 if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
350 return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
351 }
352 if bytes.Equal(tr.blk[:], zeroBlock[:]) {
353 return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
354 }
355 return nil, nil, ErrHeader // Zero block and then non-zero block
356 }
357
358 // Verify the header matches a known format.
359 format := tr.blk.GetFormat()
360 if format == FormatUnknown {
361 return nil, nil, ErrHeader
362 }
363
364 var p parser
365 hdr := new(Header)
366
367 // Unpack the V7 header.
368 v7 := tr.blk.V7()
369 hdr.Typeflag = v7.TypeFlag()[0]
370 hdr.Name = p.parseString(v7.Name())
371 hdr.Linkname = p.parseString(v7.LinkName())
372 hdr.Size = p.parseNumeric(v7.Size())
373 hdr.Mode = p.parseNumeric(v7.Mode())
374 hdr.Uid = int(p.parseNumeric(v7.UID()))
375 hdr.Gid = int(p.parseNumeric(v7.GID()))
376 hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
377
378 // Unpack format specific fields.
379 if format > formatV7 {
380 ustar := tr.blk.USTAR()
381 hdr.Uname = p.parseString(ustar.UserName())
382 hdr.Gname = p.parseString(ustar.GroupName())
383 hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
384 hdr.Devminor = p.parseNumeric(ustar.DevMinor())
385
386 var prefix string
387 switch {
388 case format.has(FormatUSTAR | FormatPAX):
389 hdr.Format = format
390 ustar := tr.blk.USTAR()
391 prefix = p.parseString(ustar.Prefix())
392
393 // For Format detection, check if block is properly formatted since
394 // the parser is more liberal than what USTAR actually permits.
395 notASCII := func(r rune) bool { return r >= 0x80 }
396 if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
397 hdr.Format = FormatUnknown // Non-ASCII characters in block.
398 }
399 nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
400 if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
401 nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
402 hdr.Format = FormatUnknown // Numeric fields must end in NUL
403 }
404 case format.has(formatSTAR):
405 star := tr.blk.STAR()
406 prefix = p.parseString(star.Prefix())
407 hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
408 hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
409 case format.has(FormatGNU):
410 hdr.Format = format
411 var p2 parser
412 gnu := tr.blk.GNU()
413 if b := gnu.AccessTime(); b[0] != 0 {
414 hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
415 }
416 if b := gnu.ChangeTime(); b[0] != 0 {
417 hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
418 }
419
420 // Prior to Go1.8, the Writer had a bug where it would output
421 // an invalid tar file in certain rare situations because the logic
422 // incorrectly believed that the old GNU format had a prefix field.
423 // This is wrong and leads to an output file that mangles the
424 // atime and ctime fields, which are often left unused.
425 //
426 // In order to continue reading tar files created by former, buggy
427 // versions of Go, we skeptically parse the atime and ctime fields.
428 // If we are unable to parse them and the prefix field looks like
429 // an ASCII string, then we fallback on the pre-Go1.8 behavior
430 // of treating these fields as the USTAR prefix field.
431 //
432 // Note that this will not use the fallback logic for all possible
433 // files generated by a pre-Go1.8 toolchain. If the generated file
434 // happened to have a prefix field that parses as valid
435 // atime and ctime fields (e.g., when they are valid octal strings),
436 // then it is impossible to distinguish between an valid GNU file
437 // and an invalid pre-Go1.8 file.
438 //
439 // See https://golang.org/issues/12594
440 // See https://golang.org/issues/21005
441 if p2.err != nil {
442 hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
443 ustar := tr.blk.USTAR()
444 if s := p.parseString(ustar.Prefix()); isASCII(s) {
445 prefix = s
446 }
447 hdr.Format = FormatUnknown // Buggy file is not GNU
448 }
449 }
450 if len(prefix) > 0 {
451 hdr.Name = prefix + "/" + hdr.Name
452 }
453 }
454 return hdr, &tr.blk, p.err
455 }
456
457 // readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
458 // The sparse map is stored in the tar header if it's small enough.
459 // If it's larger than four entries, then one or more extension headers are used
460 // to store the rest of the sparse map.
461 //
462 // The Header.Size does not reflect the size of any extended headers used.
463 // Thus, this function will read from the raw io.Reader to fetch extra headers.
464 // This method mutates blk in the process.
465 func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
466 // Make sure that the input format is GNU.
467 // Unfortunately, the STAR format also has a sparse header format that uses
468 // the same type flag but has a completely different layout.
469 if blk.GetFormat() != FormatGNU {
470 return nil, ErrHeader
471 }
472 hdr.Format.mayOnlyBe(FormatGNU)
473
474 var p parser
475 hdr.Size = p.parseNumeric(blk.GNU().RealSize())
476 if p.err != nil {
477 return nil, p.err
478 }
479 s := blk.GNU().Sparse()
480 spd := make(sparseDatas, 0, s.MaxEntries())
481 for {
482 for i := 0; i < s.MaxEntries(); i++ {
483 // This termination condition is identical to GNU and BSD tar.
484 if s.Entry(i).Offset()[0] == 0x00 {
485 break // Don't return, need to process extended headers (even if empty)
486 }
487 offset := p.parseNumeric(s.Entry(i).Offset())
488 length := p.parseNumeric(s.Entry(i).Length())
489 if p.err != nil {
490 return nil, p.err
491 }
492 spd = append(spd, sparseEntry{Offset: offset, Length: length})
493 }
494
495 if s.IsExtended()[0] > 0 {
496 // There are more entries. Read an extension header and parse its entries.
497 if _, err := mustReadFull(tr.r, blk[:]); err != nil {
498 return nil, err
499 }
500 s = blk.Sparse()
501 continue
502 }
503 return spd, nil // Done
504 }
505 }
506
507 // readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
508 // version 1.0. The format of the sparse map consists of a series of
509 // newline-terminated numeric fields. The first field is the number of entries
510 // and is always present. Following this are the entries, consisting of two
511 // fields (offset, length). This function must stop reading at the end
512 // boundary of the block containing the last newline.
513 //
514 // Note that the GNU manual says that numeric values should be encoded in octal
515 // format. However, the GNU tar utility itself outputs these values in decimal.
516 // As such, this library treats values as being encoded in decimal.
517 func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
518 var (
519 cntNewline int64
520 buf bytes.Buffer
521 blk block
522 )
523
524 // feedTokens copies data in blocks from r into buf until there are
525 // at least cnt newlines in buf. It will not read more blocks than needed.
526 feedTokens := func(n int64) error {
527 for cntNewline < n {
528 if _, err := mustReadFull(r, blk[:]); err != nil {
529 return err
530 }
531 buf.Write(blk[:])
532 for _, c := range blk {
533 if c == '\n' {
534 cntNewline++
535 }
536 }
537 }
538 return nil
539 }
540
541 // nextToken gets the next token delimited by a newline. This assumes that
542 // at least one newline exists in the buffer.
543 nextToken := func() string {
544 cntNewline--
545 tok, _ := buf.ReadString('\n')
546 return strings.TrimRight(tok, "\n")
547 }
548
549 // Parse for the number of entries.
550 // Use integer overflow resistant math to check this.
551 if err := feedTokens(1); err != nil {
552 return nil, err
553 }
554 numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
555 if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
556 return nil, ErrHeader
557 }
558
559 // Parse for all member entries.
560 // numEntries is trusted after this since a potential attacker must have
561 // committed resources proportional to what this library used.
562 if err := feedTokens(2 * numEntries); err != nil {
563 return nil, err
564 }
565 spd := make(sparseDatas, 0, numEntries)
566 for i := int64(0); i < numEntries; i++ {
567 offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
568 length, err2 := strconv.ParseInt(nextToken(), 10, 64)
569 if err1 != nil || err2 != nil {
570 return nil, ErrHeader
571 }
572 spd = append(spd, sparseEntry{Offset: offset, Length: length})
573 }
574 return spd, nil
575 }
576
577 // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
578 // version 0.1. The sparse map is stored in the PAX headers.
579 func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
580 // Get number of entries.
581 // Use integer overflow resistant math to check this.
582 numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
583 numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
584 if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
585 return nil, ErrHeader
586 }
587
588 // There should be two numbers in sparseMap for each entry.
589 sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
590 if len(sparseMap) == 1 && sparseMap[0] == "" {
591 sparseMap = sparseMap[:0]
592 }
593 if int64(len(sparseMap)) != 2*numEntries {
594 return nil, ErrHeader
595 }
596
597 // Loop through the entries in the sparse map.
598 // numEntries is trusted now.
599 spd := make(sparseDatas, 0, numEntries)
600 for len(sparseMap) >= 2 {
601 offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
602 length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
603 if err1 != nil || err2 != nil {
604 return nil, ErrHeader
605 }
606 spd = append(spd, sparseEntry{Offset: offset, Length: length})
607 sparseMap = sparseMap[2:]
608 }
609 return spd, nil
610 }
611
612 // Read reads from the current file in the tar archive.
613 // It returns (0, io.EOF) when it reaches the end of that file,
614 // until Next is called to advance to the next file.
615 //
616 // If the current file is sparse, then the regions marked as a hole
617 // are read back as NUL-bytes.
618 //
619 // Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
620 // TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
621 // the Header.Size claims.
622 func (tr *Reader) Read(b []byte) (int, error) {
623 if tr.err != nil {
624 return 0, tr.err
625 }
626 n, err := tr.curr.Read(b)
627 if err != nil && err != io.EOF {
628 tr.err = err
629 }
630 return n, err
631 }
632
633 // writeTo writes the content of the current file to w.
634 // The bytes written matches the number of remaining bytes in the current file.
635 //
636 // If the current file is sparse and w is an io.WriteSeeker,
637 // then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
638 // assuming that skipped regions are filled with NULs.
639 // This always writes the last byte to ensure w is the right size.
640 //
641 // TODO(dsnet): Re-export this when adding sparse file support.
642 // See https://golang.org/issue/22735
643 func (tr *Reader) writeTo(w io.Writer) (int64, error) {
644 if tr.err != nil {
645 return 0, tr.err
646 }
647 n, err := tr.curr.WriteTo(w)
648 if err != nil {
649 tr.err = err
650 }
651 return n, err
652 }
653
654 // regFileReader is a fileReader for reading data from a regular file entry.
655 type regFileReader struct {
656 r io.Reader // Underlying Reader
657 nb int64 // Number of remaining bytes to read
658 }
659
660 func (fr *regFileReader) Read(b []byte) (n int, err error) {
661 if int64(len(b)) > fr.nb {
662 b = b[:fr.nb]
663 }
664 if len(b) > 0 {
665 n, err = fr.r.Read(b)
666 fr.nb -= int64(n)
667 }
668 switch {
669 case err == io.EOF && fr.nb > 0:
670 return n, io.ErrUnexpectedEOF
671 case err == nil && fr.nb == 0:
672 return n, io.EOF
673 default:
674 return n, err
675 }
676 }
677
678 func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
679 return io.Copy(w, struct{ io.Reader }{fr})
680 }
681
682 func (fr regFileReader) LogicalRemaining() int64 {
683 return fr.nb
684 }
685
686 func (fr regFileReader) PhysicalRemaining() int64 {
687 return fr.nb
688 }
689
690 // sparseFileReader is a fileReader for reading data from a sparse file entry.
691 type sparseFileReader struct {
692 fr fileReader // Underlying fileReader
693 sp sparseHoles // Normalized list of sparse holes
694 pos int64 // Current position in sparse file
695 }
696
697 func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
698 finished := int64(len(b)) >= sr.LogicalRemaining()
699 if finished {
700 b = b[:sr.LogicalRemaining()]
701 }
702
703 b0 := b
704 endPos := sr.pos + int64(len(b))
705 for endPos > sr.pos && err == nil {
706 var nf int // Bytes read in fragment
707 holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
708 if sr.pos < holeStart { // In a data fragment
709 bf := b[:min(int64(len(b)), holeStart-sr.pos)]
710 nf, err = tryReadFull(sr.fr, bf)
711 } else { // In a hole fragment
712 bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
713 nf, err = tryReadFull(zeroReader{}, bf)
714 }
715 b = b[nf:]
716 sr.pos += int64(nf)
717 if sr.pos >= holeEnd && len(sr.sp) > 1 {
718 sr.sp = sr.sp[1:] // Ensure last fragment always remains
719 }
720 }
721
722 n = len(b0) - len(b)
723 switch {
724 case err == io.EOF:
725 return n, errMissData // Less data in dense file than sparse file
726 case err != nil:
727 return n, err
728 case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
729 return n, errUnrefData // More data in dense file than sparse file
730 case finished:
731 return n, io.EOF
732 default:
733 return n, nil
734 }
735 }
736
737 func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
738 ws, ok := w.(io.WriteSeeker)
739 if ok {
740 if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
741 ok = false // Not all io.Seeker can really seek
742 }
743 }
744 if !ok {
745 return io.Copy(w, struct{ io.Reader }{sr})
746 }
747
748 var writeLastByte bool
749 pos0 := sr.pos
750 for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
751 var nf int64 // Size of fragment
752 holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
753 if sr.pos < holeStart { // In a data fragment
754 nf = holeStart - sr.pos
755 nf, err = io.CopyN(ws, sr.fr, nf)
756 } else { // In a hole fragment
757 nf = holeEnd - sr.pos
758 if sr.PhysicalRemaining() == 0 {
759 writeLastByte = true
760 nf--
761 }
762 _, err = ws.Seek(nf, io.SeekCurrent)
763 }
764 sr.pos += nf
765 if sr.pos >= holeEnd && len(sr.sp) > 1 {
766 sr.sp = sr.sp[1:] // Ensure last fragment always remains
767 }
768 }
769
770 // If the last fragment is a hole, then seek to 1-byte before EOF, and
771 // write a single byte to ensure the file is the right size.
772 if writeLastByte && err == nil {
773 _, err = ws.Write([]byte{0})
774 sr.pos++
775 }
776
777 n = sr.pos - pos0
778 switch {
779 case err == io.EOF:
780 return n, errMissData // Less data in dense file than sparse file
781 case err != nil:
782 return n, err
783 case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
784 return n, errUnrefData // More data in dense file than sparse file
785 default:
786 return n, nil
787 }
788 }
789
790 func (sr sparseFileReader) LogicalRemaining() int64 {
791 return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
792 }
793 func (sr sparseFileReader) PhysicalRemaining() int64 {
794 return sr.fr.PhysicalRemaining()
795 }
796
797 type zeroReader struct{}
798
799 func (zeroReader) Read(b []byte) (int, error) {
800 for i := range b {
801 b[i] = 0
802 }
803 return len(b), nil
804 }
805
806 // mustReadFull is like io.ReadFull except it returns
807 // io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
808 func mustReadFull(r io.Reader, b []byte) (int, error) {
809 n, err := tryReadFull(r, b)
810 if err == io.EOF {
811 err = io.ErrUnexpectedEOF
812 }
813 return n, err
814 }
815
816 // tryReadFull is like io.ReadFull except it returns
817 // io.EOF when it is hit before len(b) bytes are read.
818 func tryReadFull(r io.Reader, b []byte) (n int, err error) {
819 for len(b) > n && err == nil {
820 var nn int
821 nn, err = r.Read(b[n:])
822 n += nn
823 }
824 if len(b) == n && err == io.EOF {
825 err = nil
826 }
827 return n, err
828 }
829
830 // discard skips n bytes in r, reporting an error if unable to do so.
831 func discard(r io.Reader, n int64) error {
832 // If possible, Seek to the last byte before the end of the data section.
833 // Do this because Seek is often lazy about reporting errors; this will mask
834 // the fact that the stream may be truncated. We can rely on the
835 // io.CopyN done shortly afterwards to trigger any IO errors.
836 var seekSkipped int64 // Number of bytes skipped via Seek
837 if sr, ok := r.(io.Seeker); ok && n > 1 {
838 // Not all io.Seeker can actually Seek. For example, os.Stdin implements
839 // io.Seeker, but calling Seek always returns an error and performs
840 // no action. Thus, we try an innocent seek to the current position
841 // to see if Seek is really supported.
842 pos1, err := sr.Seek(0, io.SeekCurrent)
843 if pos1 >= 0 && err == nil {
844 // Seek seems supported, so perform the real Seek.
845 pos2, err := sr.Seek(n-1, io.SeekCurrent)
846 if pos2 < 0 || err != nil {
847 return err
848 }
849 seekSkipped = pos2 - pos1
850 }
851 }
852
853 copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped)
854 if err == io.EOF && seekSkipped+copySkipped < n {
855 err = io.ErrUnexpectedEOF
856 }
857 return err
858 }