aboutsummaryrefslogtreecommitdiff
path: root/src/encoding/csv
diff options
context:
space:
mode:
authorJoe Tsai <joetsai@digital-static.net>2023-09-01 01:54:25 -0700
committerJoseph Tsai <joetsai@digital-static.net>2023-09-08 19:04:28 +0000
commitdac9b9ddbd5160c5f4552410f5f8281bd5eed38c (patch)
tree63c2331085cdd08681cc2a8d1a30e5b513989ab5 /src/encoding/csv
parent45d3d10071830052b45a3299c26a1849a0c0c856 (diff)
downloadgo-dac9b9ddbd5160c5f4552410f5f8281bd5eed38c.tar.xz
encoding: modernize Go documentation
Across all encoding packages, linkify declarations if possible. In some cases, we convert a code block into a bulleted list, which then further allows for more linkification. Change-Id: I68fedf362615b34228bab5d4859b7d87d831c570 Reviewed-on: https://go-review.googlesource.com/c/go/+/524977 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Daniel Martí <mvdan@mvdan.cc> Reviewed-by: Ian Lance Taylor <iant@google.com> Reviewed-by: qiulaidongfeng <2645477756@qq.com> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src/encoding/csv')
-rw-r--r--src/encoding/csv/reader.go18
-rw-r--r--src/encoding/csv/writer.go31
2 files changed, 26 insertions, 23 deletions
diff --git a/src/encoding/csv/reader.go b/src/encoding/csv/reader.go
index a93de9822d..d9cab86572 100644
--- a/src/encoding/csv/reader.go
+++ b/src/encoding/csv/reader.go
@@ -82,7 +82,7 @@ func (e *ParseError) Error() string {
func (e *ParseError) Unwrap() error { return e.Err }
-// These are the errors that can be returned in ParseError.Err.
+// These are the errors that can be returned in [ParseError.Err].
var (
ErrBareQuote = errors.New("bare \" in non-quoted-field")
ErrQuote = errors.New("extraneous or missing \" in quoted-field")
@@ -100,9 +100,9 @@ func validDelim(r rune) bool {
// A Reader reads records from a CSV-encoded file.
//
-// As returned by NewReader, a Reader expects input conforming to RFC 4180.
+// As returned by [NewReader], a Reader expects input conforming to RFC 4180.
// The exported fields can be changed to customize the details before the
-// first call to Read or ReadAll.
+// first call to [Reader.Read] or [Reader.ReadAll].
//
// The Reader converts all \r\n sequences in its input to plain \n,
// including in multiline field values, so that the returned data does
@@ -186,12 +186,12 @@ func NewReader(r io.Reader) *Reader {
// Read reads one record (a slice of fields) from r.
// If the record has an unexpected number of fields,
-// Read returns the record along with the error ErrFieldCount.
+// Read returns the record along with the error [ErrFieldCount].
// If the record contains a field that cannot be parsed,
// Read returns a partial record along with the parse error.
// The partial record contains all fields read before the error.
-// If there is no data left to be read, Read returns nil, io.EOF.
-// If ReuseRecord is true, the returned slice may be shared
+// If there is no data left to be read, Read returns nil, [io.EOF].
+// If [Reader.ReuseRecord] is true, the returned slice may be shared
// between multiple calls to Read.
func (r *Reader) Read() (record []string, err error) {
if r.ReuseRecord {
@@ -205,7 +205,7 @@ func (r *Reader) Read() (record []string, err error) {
// FieldPos returns the line and column corresponding to
// the start of the field with the given index in the slice most recently
-// returned by Read. Numbering of lines and columns starts at 1;
+// returned by [Reader.Read]. Numbering of lines and columns starts at 1;
// columns are counted in bytes, not runes.
//
// If this is called with an out-of-bounds index, it panics.
@@ -231,7 +231,7 @@ type position struct {
// ReadAll reads all the remaining records from r.
// Each record is a slice of fields.
-// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
+// A successful call returns err == nil, not err == [io.EOF]. Because ReadAll is
// defined to read until EOF, it does not treat end of file as an error to be
// reported.
func (r *Reader) ReadAll() (records [][]string, err error) {
@@ -249,7 +249,7 @@ func (r *Reader) ReadAll() (records [][]string, err error) {
// readLine reads the next line (with the trailing endline).
// If EOF is hit without a trailing endline, it will be omitted.
-// If some bytes were read, then the error is never io.EOF.
+// If some bytes were read, then the error is never [io.EOF].
// The result is only valid until the next call to readLine.
func (r *Reader) readLine() ([]byte, error) {
line, err := r.r.ReadSlice('\n')
diff --git a/src/encoding/csv/writer.go b/src/encoding/csv/writer.go
index ac64b4d54c..ff3142f0bb 100644
--- a/src/encoding/csv/writer.go
+++ b/src/encoding/csv/writer.go
@@ -14,19 +14,21 @@ import (
// A Writer writes records using CSV encoding.
//
-// As returned by NewWriter, a Writer writes records terminated by a
+// As returned by [NewWriter], a Writer writes records terminated by a
// newline and uses ',' as the field delimiter. The exported fields can be
-// changed to customize the details before the first call to Write or WriteAll.
+// changed to customize the details before
+// the first call to [Writer.Write] or [Writer.WriteAll].
//
-// Comma is the field delimiter.
+// [Writer.Comma] is the field delimiter.
//
-// If UseCRLF is true, the Writer ends each output line with \r\n instead of \n.
+// If [Writer.UseCRLF] is true,
+// the Writer ends each output line with \r\n instead of \n.
//
// The writes of individual records are buffered.
// After all data has been written, the client should call the
-// Flush method to guarantee all data has been forwarded to
-// the underlying io.Writer. Any errors that occurred should
-// be checked by calling the Error method.
+// [Writer.Flush] method to guarantee all data has been forwarded to
+// the underlying [io.Writer]. Any errors that occurred should
+// be checked by calling the [Writer.Error] method.
type Writer struct {
Comma rune // Field delimiter (set to ',' by NewWriter)
UseCRLF bool // True to use \r\n as the line terminator
@@ -43,8 +45,8 @@ func NewWriter(w io.Writer) *Writer {
// Write writes a single CSV record to w along with any necessary quoting.
// A record is a slice of strings with each string being one field.
-// Writes are buffered, so Flush must eventually be called to ensure
-// that the record is written to the underlying io.Writer.
+// Writes are buffered, so [Writer.Flush] must eventually be called to ensure
+// that the record is written to the underlying [io.Writer].
func (w *Writer) Write(record []string) error {
if !validDelim(w.Comma) {
return errInvalidDelim
@@ -118,20 +120,21 @@ func (w *Writer) Write(record []string) error {
return err
}
-// Flush writes any buffered data to the underlying io.Writer.
-// To check if an error occurred during the Flush, call Error.
+// Flush writes any buffered data to the underlying [io.Writer].
+// To check if an error occurred during Flush, call [Writer.Error].
func (w *Writer) Flush() {
w.w.Flush()
}
-// Error reports any error that has occurred during a previous Write or Flush.
+// Error reports any error that has occurred during
+// a previous [Writer.Write] or [Writer.Flush].
func (w *Writer) Error() error {
_, err := w.w.Write(nil)
return err
}
-// WriteAll writes multiple CSV records to w using Write and then calls Flush,
-// returning any error from the Flush.
+// WriteAll writes multiple CSV records to w using [Writer.Write] and
+// then calls [Writer.Flush], returning any error from the Flush.
func (w *Writer) WriteAll(records [][]string) error {
for _, record := range records {
err := w.Write(record)