reader.mx raw

   1  // Copyright 2011 The Go Authors. All rights reserved.
   2  // Use of this source code is governed by a BSD-style
   3  // license that can be found in the LICENSE file.
   4  
   5  // Package csv reads and writes comma-separated values (CSV) files.
   6  // There are many kinds of CSV files; this package supports the format
   7  // described in RFC 4180, except that [Writer] uses LF
   8  // instead of CRLF as newline character by default.
   9  //
  10  // A csv file contains zero or more records of one or more fields per record.
  11  // Each record is separated by the newline character. The final record may
  12  // optionally be followed by a newline character.
  13  //
  14  //	field1,field2,field3
  15  //
  16  // White space is considered part of a field.
  17  //
  18  // Carriage returns before newline characters are silently removed.
  19  //
  20  // Blank lines are ignored. A line with only whitespace characters (excluding
  21  // the ending newline character) is not considered a blank line.
  22  //
  23  // Fields which start and stop with the quote character " are called
  24  // quoted-fields. The beginning and ending quote are not part of the
  25  // field.
  26  //
  27  // The source:
  28  //
  29  //	normal string,"quoted-field"
  30  //
  31  // results in the fields
  32  //
  33  //	{`normal string`, `quoted-field`}
  34  //
  35  // Within a quoted-field a quote character followed by a second quote
  36  // character is considered a single quote.
  37  //
  38  //	"the ""word"" is true","a ""quoted-field"""
  39  //
  40  // results in
  41  //
  42  //	{`the "word" is true`, `a "quoted-field"`}
  43  //
  44  // Newlines and commas may be included in a quoted-field
  45  //
  46  //	"Multi-line
  47  //	field","comma is ,"
  48  //
  49  // results in
  50  //
  51  //	{`Multi-line
  52  //	field`, `comma is ,`}
  53  package csv
  54  
  55  import (
  56  	"bufio"
  57  	"bytes"
  58  	"errors"
  59  	"fmt"
  60  	"io"
  61  	"unicode"
  62  	"unicode/utf8"
  63  )
  64  
  65  // A ParseError is returned for parsing errors.
  66  // Line and column numbers are 1-indexed.
  67  type ParseError struct {
  68  	StartLine int   // Line where the record starts
  69  	Line      int   // Line where the error occurred
  70  	Column    int   // Column (1-based byte index) where the error occurred
  71  	Err       error // The actual error
  72  }
  73  
  74  func (e *ParseError) Error() string {
  75  	if e.Err == ErrFieldCount {
  76  		return fmt.Sprintf("record on line %d: %v", e.Line, e.Err)
  77  	}
  78  	if e.StartLine != e.Line {
  79  		return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err)
  80  	}
  81  	return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err)
  82  }
  83  
  84  func (e *ParseError) Unwrap() error { return e.Err }
  85  
  86  // These are the errors that can be returned in [ParseError.Err].
  87  var (
  88  	ErrBareQuote  = errors.New("bare \" in non-quoted-field")
  89  	ErrQuote      = errors.New("extraneous or missing \" in quoted-field")
  90  	ErrFieldCount = errors.New("wrong number of fields")
  91  
  92  	// Deprecated: ErrTrailingComma is no longer used.
  93  	ErrTrailingComma = errors.New("extra delimiter at end of line")
  94  )
  95  
  96  var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
  97  
  98  func validDelim(r rune) bool {
  99  	return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
 100  }
 101  
 102  // A Reader reads records from a CSV-encoded file.
 103  //
 104  // As returned by [NewReader], a Reader expects input conforming to RFC 4180.
 105  // The exported fields can be changed to customize the details before the
 106  // first call to [Reader.Read] or [Reader.ReadAll].
 107  //
 108  // The Reader converts all \r\n sequences in its input to plain \n,
 109  // including in multiline field values, so that the returned data does
 110  // not depend on which line-ending convention an input file uses.
 111  type Reader struct {
 112  	// Comma is the field delimiter.
 113  	// It is set to comma (',') by NewReader.
 114  	// Comma must be a valid rune and must not be \r, \n,
 115  	// or the Unicode replacement character (0xFFFD).
 116  	Comma rune
 117  
 118  	// Comment, if not 0, is the comment character. Lines beginning with the
 119  	// Comment character without preceding whitespace are ignored.
 120  	// With leading whitespace the Comment character becomes part of the
 121  	// field, even if TrimLeadingSpace is true.
 122  	// Comment must be a valid rune and must not be \r, \n,
 123  	// or the Unicode replacement character (0xFFFD).
 124  	// It must also not be equal to Comma.
 125  	Comment rune
 126  
 127  	// FieldsPerRecord is the number of expected fields per record.
 128  	// If FieldsPerRecord is positive, Read requires each record to
 129  	// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
 130  	// the number of fields in the first record, so that future records must
 131  	// have the same field count. If FieldsPerRecord is negative, no check is
 132  	// made and records may have a variable number of fields.
 133  	FieldsPerRecord int
 134  
 135  	// If LazyQuotes is true, a quote may appear in an unquoted field and a
 136  	// non-doubled quote may appear in a quoted field.
 137  	LazyQuotes bool
 138  
 139  	// If TrimLeadingSpace is true, leading white space in a field is ignored.
 140  	// This is done even if the field delimiter, Comma, is white space.
 141  	TrimLeadingSpace bool
 142  
 143  	// ReuseRecord controls whether calls to Read may return a slice sharing
 144  	// the backing array of the previous call's returned slice for performance.
 145  	// By default, each call to Read returns newly allocated memory owned by the caller.
 146  	ReuseRecord bool
 147  
 148  	// Deprecated: TrailingComma is no longer used.
 149  	TrailingComma bool
 150  
 151  	r *bufio.Reader
 152  
 153  	// numLine is the current line being read in the CSV file.
 154  	numLine int
 155  
 156  	// offset is the input stream byte offset of the current reader position.
 157  	offset int64
 158  
 159  	// rawBuffer is a line buffer only used by the readLine method.
 160  	rawBuffer []byte
 161  
 162  	// recordBuffer holds the unescaped fields, one after another.
 163  	// The fields can be accessed by using the indexes in fieldIndexes.
 164  	// E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
 165  	// and fieldIndexes will contain the indexes [1, 2, 5, 6].
 166  	recordBuffer []byte
 167  
 168  	// fieldIndexes is an index of fields inside recordBuffer.
 169  	// The i'th field ends at offset fieldIndexes[i] in recordBuffer.
 170  	fieldIndexes []int
 171  
 172  	// fieldPositions is an index of field positions for the
 173  	// last record returned by Read.
 174  	fieldPositions []position
 175  
 176  	// lastRecord is a record cache and only used when ReuseRecord == true.
 177  	lastRecord [][]byte
 178  }
 179  
 180  // NewReader returns a new Reader that reads from r.
 181  func NewReader(r io.Reader) *Reader {
 182  	return &Reader{
 183  		Comma: ',',
 184  		r:     bufio.NewReader(r),
 185  	}
 186  }
 187  
 188  // Read reads one record (a slice of fields) from r.
 189  // If the record has an unexpected number of fields,
 190  // Read returns the record along with the error [ErrFieldCount].
 191  // If the record contains a field that cannot be parsed,
 192  // Read returns a partial record along with the parse error.
 193  // The partial record contains all fields read before the error.
 194  // If there is no data left to be read, Read returns nil, [io.EOF].
 195  // If [Reader.ReuseRecord] is true, the returned slice may be shared
 196  // between multiple calls to Read.
 197  func (r *Reader) Read() (record [][]byte, err error) {
 198  	if r.ReuseRecord {
 199  		record, err = r.readRecord(r.lastRecord)
 200  		r.lastRecord = record
 201  	} else {
 202  		record, err = r.readRecord(nil)
 203  	}
 204  	return record, err
 205  }
 206  
 207  // FieldPos returns the line and column corresponding to
 208  // the start of the field with the given index in the slice most recently
 209  // returned by [Reader.Read]. Numbering of lines and columns starts at 1;
 210  // columns are counted in bytes, not runes.
 211  //
 212  // If this is called with an out-of-bounds index, it panics.
 213  func (r *Reader) FieldPos(field int) (line, column int) {
 214  	if field < 0 || field >= len(r.fieldPositions) {
 215  		panic("out of range index passed to FieldPos")
 216  	}
 217  	p := &r.fieldPositions[field]
 218  	return p.line, p.col
 219  }
 220  
 221  // InputOffset returns the input stream byte offset of the current reader
 222  // position. The offset gives the location of the end of the most recently
 223  // read row and the beginning of the next row.
 224  func (r *Reader) InputOffset() int64 {
 225  	return r.offset
 226  }
 227  
 228  // pos holds the position of a field in the current line.
 229  type position struct {
 230  	line, col int
 231  }
 232  
 233  // ReadAll reads all the remaining records from r.
 234  // Each record is a slice of fields.
 235  // A successful call returns err == nil, not err == [io.EOF]. Because ReadAll is
 236  // defined to read until EOF, it does not treat end of file as an error to be
 237  // reported.
 238  func (r *Reader) ReadAll() (records [][][]byte, err error) {
 239  	for {
 240  		record, err := r.readRecord(nil)
 241  		if err == io.EOF {
 242  			return records, nil
 243  		}
 244  		if err != nil {
 245  			return nil, err
 246  		}
 247  		records = append(records, record)
 248  	}
 249  }
 250  
 251  // readLine reads the next line (with the trailing endline).
 252  // If EOF is hit without a trailing endline, it will be omitted.
 253  // If some bytes were read, then the error is never [io.EOF].
 254  // The result is only valid until the next call to readLine.
 255  func (r *Reader) readLine() ([]byte, error) {
 256  	line, err := r.r.ReadSlice('\n')
 257  	if err == bufio.ErrBufferFull {
 258  		r.rawBuffer = append(r.rawBuffer[:0], line...)
 259  		for err == bufio.ErrBufferFull {
 260  			line, err = r.r.ReadSlice('\n')
 261  			r.rawBuffer = append(r.rawBuffer, line...)
 262  		}
 263  		line = r.rawBuffer
 264  	}
 265  	readSize := len(line)
 266  	if readSize > 0 && err == io.EOF {
 267  		err = nil
 268  		// For backwards compatibility, drop trailing \r before EOF.
 269  		if line[readSize-1] == '\r' {
 270  			line = line[:readSize-1]
 271  		}
 272  	}
 273  	r.numLine++
 274  	r.offset += int64(readSize)
 275  	// Normalize \r\n to \n on all input lines.
 276  	if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' {
 277  		line[n-2] = '\n'
 278  		line = line[:n-1]
 279  	}
 280  	return line, err
 281  }
 282  
 283  // lengthNL reports the number of bytes for the trailing \n.
 284  func lengthNL(b []byte) int {
 285  	if len(b) > 0 && b[len(b)-1] == '\n' {
 286  		return 1
 287  	}
 288  	return 0
 289  }
 290  
 291  // nextRune returns the next rune in b or utf8.RuneError.
 292  func nextRune(b []byte) rune {
 293  	r, _ := utf8.DecodeRune(b)
 294  	return r
 295  }
 296  
 297  func (r *Reader) readRecord(dst [][]byte) ([][]byte, error) {
 298  	if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
 299  		return nil, errInvalidDelim
 300  	}
 301  
 302  	// Read line (automatically skipping past empty lines and any comments).
 303  	var line []byte
 304  	var errRead error
 305  	for errRead == nil {
 306  		line, errRead = r.readLine()
 307  		if r.Comment != 0 && nextRune(line) == r.Comment {
 308  			line = nil
 309  			continue // Skip comment lines
 310  		}
 311  		if errRead == nil && len(line) == lengthNL(line) {
 312  			line = nil
 313  			continue // Skip empty lines
 314  		}
 315  		break
 316  	}
 317  	if errRead == io.EOF {
 318  		return nil, errRead
 319  	}
 320  
 321  	// Parse each field in the record.
 322  	var err error
 323  	const quoteLen = len(`"`)
 324  	commaLen := utf8.RuneLen(r.Comma)
 325  	recLine := r.numLine // Starting line for record
 326  	r.recordBuffer = r.recordBuffer[:0]
 327  	r.fieldIndexes = r.fieldIndexes[:0]
 328  	r.fieldPositions = r.fieldPositions[:0]
 329  	pos := position{line: r.numLine, col: 1}
 330  parseField:
 331  	for {
 332  		if r.TrimLeadingSpace {
 333  			i := bytes.IndexFunc(line, func(r rune) bool {
 334  				return !unicode.IsSpace(r)
 335  			})
 336  			if i < 0 {
 337  				i = len(line)
 338  				pos.col -= lengthNL(line)
 339  			}
 340  			line = line[i:]
 341  			pos.col += i
 342  		}
 343  		if len(line) == 0 || line[0] != '"' {
 344  			// Non-quoted string field
 345  			i := bytes.IndexRune(line, r.Comma)
 346  			field := line
 347  			if i >= 0 {
 348  				field = field[:i]
 349  			} else {
 350  				field = field[:len(field)-lengthNL(field)]
 351  			}
 352  			// Check to make sure a quote does not appear in field.
 353  			if !r.LazyQuotes {
 354  				if j := bytes.IndexByte(field, '"'); j >= 0 {
 355  					col := pos.col + j
 356  					err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
 357  					break parseField
 358  				}
 359  			}
 360  			r.recordBuffer = append(r.recordBuffer, field...)
 361  			r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
 362  			r.fieldPositions = append(r.fieldPositions, pos)
 363  			if i >= 0 {
 364  				line = line[i+commaLen:]
 365  				pos.col += i + commaLen
 366  				continue parseField
 367  			}
 368  			break parseField
 369  		} else {
 370  			// Quoted string field
 371  			fieldPos := pos
 372  			line = line[quoteLen:]
 373  			pos.col += quoteLen
 374  			for {
 375  				i := bytes.IndexByte(line, '"')
 376  				if i >= 0 {
 377  					// Hit next quote.
 378  					r.recordBuffer = append(r.recordBuffer, line[:i]...)
 379  					line = line[i+quoteLen:]
 380  					pos.col += i + quoteLen
 381  					switch rn := nextRune(line); {
 382  					case rn == '"':
 383  						// `""` sequence (append quote).
 384  						r.recordBuffer = append(r.recordBuffer, '"')
 385  						line = line[quoteLen:]
 386  						pos.col += quoteLen
 387  					case rn == r.Comma:
 388  						// `",` sequence (end of field).
 389  						line = line[commaLen:]
 390  						pos.col += commaLen
 391  						r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
 392  						r.fieldPositions = append(r.fieldPositions, fieldPos)
 393  						continue parseField
 394  					case lengthNL(line) == len(line):
 395  						// `"\n` sequence (end of line).
 396  						r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
 397  						r.fieldPositions = append(r.fieldPositions, fieldPos)
 398  						break parseField
 399  					case r.LazyQuotes:
 400  						// `"` sequence (bare quote).
 401  						r.recordBuffer = append(r.recordBuffer, '"')
 402  					default:
 403  						// `"*` sequence (invalid non-escaped quote).
 404  						err = &ParseError{StartLine: recLine, Line: r.numLine, Column: pos.col - quoteLen, Err: ErrQuote}
 405  						break parseField
 406  					}
 407  				} else if len(line) > 0 {
 408  					// Hit end of line (copy all data so far).
 409  					r.recordBuffer = append(r.recordBuffer, line...)
 410  					if errRead != nil {
 411  						break parseField
 412  					}
 413  					pos.col += len(line)
 414  					line, errRead = r.readLine()
 415  					if len(line) > 0 {
 416  						pos.line++
 417  						pos.col = 1
 418  					}
 419  					if errRead == io.EOF {
 420  						errRead = nil
 421  					}
 422  				} else {
 423  					// Abrupt end of file (EOF or error).
 424  					if !r.LazyQuotes && errRead == nil {
 425  						err = &ParseError{StartLine: recLine, Line: pos.line, Column: pos.col, Err: ErrQuote}
 426  						break parseField
 427  					}
 428  					r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
 429  					r.fieldPositions = append(r.fieldPositions, fieldPos)
 430  					break parseField
 431  				}
 432  			}
 433  		}
 434  	}
 435  	if err == nil {
 436  		err = errRead
 437  	}
 438  
 439  	// Create a single string and create slices out of it.
 440  	// This pins the memory of the fields together, but allocates once.
 441  	str := []byte(r.recordBuffer) // Convert to string once to batch allocations
 442  	dst = dst[:0]
 443  	if cap(dst) < len(r.fieldIndexes) {
 444  		dst = [][]byte{:len(r.fieldIndexes)}
 445  	}
 446  	dst = dst[:len(r.fieldIndexes)]
 447  	var preIdx int
 448  	for i, idx := range r.fieldIndexes {
 449  		dst[i] = str[preIdx:idx]
 450  		preIdx = idx
 451  	}
 452  
 453  	// Check or update the expected fields per record.
 454  	if r.FieldsPerRecord > 0 {
 455  		if len(dst) != r.FieldsPerRecord && err == nil {
 456  			err = &ParseError{
 457  				StartLine: recLine,
 458  				Line:      recLine,
 459  				Column:    1,
 460  				Err:       ErrFieldCount,
 461  			}
 462  		}
 463  	} else if r.FieldsPerRecord == 0 {
 464  		r.FieldsPerRecord = len(dst)
 465  	}
 466  	return dst, err
 467  }
 468