// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package build

import (
	
	
	
	
	
	
	
	
	
	
	
	
	
	_  // for linkname
)

type importReader struct {
	b    *bufio.Reader
	buf  []byte
	peek byte
	err  error
	eof  bool
	nerr int
	pos  token.Position
}

var bom = []byte{0xef, 0xbb, 0xbf}

func newImportReader( string,  io.Reader) *importReader {
	 := bufio.NewReader()
	// Remove leading UTF-8 BOM.
	// Per https://golang.org/ref/spec#Source_code_representation:
	// a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
	// if it is the first Unicode code point in the source text.
	if ,  := .Peek(3);  == nil && bytes.Equal(, bom) {
		.Discard(3)
	}
	return &importReader{
		b: ,
		pos: token.Position{
			Filename: ,
			Line:     1,
			Column:   1,
		},
	}
}

func isIdent( byte) bool {
	return 'A' <=  &&  <= 'Z' || 'a' <=  &&  <= 'z' || '0' <=  &&  <= '9' ||  == '_' ||  >= utf8.RuneSelf
}

var (
	errSyntax = errors.New("syntax error")
	errNUL    = errors.New("unexpected NUL in input")
)

// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
func ( *importReader) () {
	if .err == nil {
		.err = errSyntax
	}
}

// readByte reads the next byte from the input, saves it in buf, and returns it.
// If an error occurs, readByte records the error in r.err and returns 0.
func ( *importReader) () byte {
	,  := .b.ReadByte()
	if  == nil {
		.buf = append(.buf, )
		if  == 0 {
			 = errNUL
		}
	}
	if  != nil {
		if  == io.EOF {
			.eof = true
		} else if .err == nil {
			.err = 
		}
		 = 0
	}
	return 
}

// readRest reads the entire rest of the file into r.buf.
func ( *importReader) () {
	for {
		if len(.buf) == cap(.buf) {
			// Grow the buffer
			.buf = append(.buf, 0)[:len(.buf)]
		}
		,  := .b.Read(.buf[len(.buf):cap(.buf)])
		.buf = .buf[:len(.buf)+]
		if  != nil {
			if  == io.EOF {
				.eof = true
			} else if .err == nil {
				.err = 
			}
			break
		}
	}
}

// peekByte returns the next byte from the input reader but does not advance beyond it.
// If skipSpace is set, peekByte skips leading spaces and comments.
func ( *importReader) ( bool) byte {
	if .err != nil {
		if .nerr++; .nerr > 10000 {
			panic("go/build: import reader looping")
		}
		return 0
	}

	// Use r.peek as first input byte.
	// Don't just return r.peek here: it might have been left by peekByte(false)
	// and this might be peekByte(true).
	 := .peek
	if  == 0 {
		 = .readByte()
	}
	for .err == nil && !.eof {
		if  {
			// For the purposes of this reader, semicolons are never necessary to
			// understand the input and are treated as spaces.
			switch  {
			case ' ', '\f', '\t', '\r', '\n', ';':
				 = .readByte()
				continue

			case '/':
				 = .readByte()
				if  == '/' {
					for  != '\n' && .err == nil && !.eof {
						 = .readByte()
					}
				} else if  == '*' {
					var  byte
					for ( != '*' ||  != '/') && .err == nil {
						if .eof {
							.syntaxError()
						}
						,  = , .readByte()
					}
				} else {
					.syntaxError()
				}
				 = .readByte()
				continue
			}
		}
		break
	}
	.peek = 
	return .peek
}

// nextByte is like peekByte but advances beyond the returned byte.
func ( *importReader) ( bool) byte {
	 := .peekByte()
	.peek = 0
	return 
}

// readKeyword reads the given keyword from the input.
// If the keyword is not present, readKeyword records a syntax error.
func ( *importReader) ( string) {
	.peekByte(true)
	for  := 0;  < len(); ++ {
		if .nextByte(false) != [] {
			.syntaxError()
			return
		}
	}
	if isIdent(.peekByte(false)) {
		.syntaxError()
	}
}

// readIdent reads an identifier from the input.
// If an identifier is not present, readIdent records a syntax error.
func ( *importReader) () {
	 := .peekByte(true)
	if !isIdent() {
		.syntaxError()
		return
	}
	for isIdent(.peekByte(false)) {
		.peek = 0
	}
}

// readString reads a quoted string literal from the input.
// If an identifier is not present, readString records a syntax error.
func ( *importReader) () {
	switch .nextByte(true) {
	case '`':
		for .err == nil {
			if .nextByte(false) == '`' {
				break
			}
			if .eof {
				.syntaxError()
			}
		}
	case '"':
		for .err == nil {
			 := .nextByte(false)
			if  == '"' {
				break
			}
			if .eof ||  == '\n' {
				.syntaxError()
			}
			if  == '\\' {
				.nextByte(false)
			}
		}
	default:
		.syntaxError()
	}
}

// readImport reads an import clause - optional identifier followed by quoted string -
// from the input.
func ( *importReader) () {
	 := .peekByte(true)
	if  == '.' {
		.peek = 0
	} else if isIdent() {
		.readIdent()
	}
	.readString()
}

// readComments is like io.ReadAll, except that it only reads the leading
// block of comments in the file.
//
// readComments should be an internal detail,
// but widely used packages access it using linkname.
// Notable members of the hall of shame include:
//   - github.com/bazelbuild/bazel-gazelle
//
// Do not remove or change the type signature.
// See go.dev/issue/67401.
//
//go:linkname readComments
func readComments( io.Reader) ([]byte, error) {
	 := newImportReader("", )
	.peekByte(true)
	if .err == nil && !.eof {
		// Didn't reach EOF, so must have found a non-space byte. Remove it.
		.buf = .buf[:len(.buf)-1]
	}
	return .buf, .err
}

// readGoInfo expects a Go file as input and reads the file up to and including the import section.
// It records what it learned in *info.
// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr,
// info.imports and info.embeds.
//
// It only returns an error if there are problems reading the file,
// not for syntax errors in the file itself.
func readGoInfo( io.Reader,  *fileInfo) error {
	 := newImportReader(.name, )

	.readKeyword("package")
	.readIdent()
	for .peekByte(true) == 'i' {
		.readKeyword("import")
		if .peekByte(true) == '(' {
			.nextByte(false)
			for .peekByte(true) != ')' && .err == nil {
				.readImport()
			}
			.nextByte(false)
		} else {
			.readImport()
		}
	}

	.header = .buf

	// If we stopped successfully before EOF, we read a byte that told us we were done.
	// Return all but that last byte, which would cause a syntax error if we let it through.
	if .err == nil && !.eof {
		.header = .buf[:len(.buf)-1]
	}

	// If we stopped for a syntax error, consume the whole file so that
	// we are sure we don't change the errors that go/parser returns.
	if .err == errSyntax {
		.err = nil
		.readRest()
		.header = .buf
	}
	if .err != nil {
		return .err
	}

	if .fset == nil {
		return nil
	}

	// Parse file header & record imports.
	.parsed, .parseErr = parser.ParseFile(.fset, .name, .header, parser.ImportsOnly|parser.ParseComments)
	if .parseErr != nil {
		return nil
	}

	 := false
	for ,  := range .parsed.Decls {
		,  := .(*ast.GenDecl)
		if ! {
			continue
		}
		for ,  := range .Specs {
			,  := .(*ast.ImportSpec)
			if ! {
				continue
			}
			 := .Path.Value
			,  := strconv.Unquote()
			if  != nil {
				return fmt.Errorf("parser returned invalid quoted string: <%s>", )
			}
			if !isValidImport() {
				// The parser used to return a parse error for invalid import paths, but
				// no longer does, so check for and create the error here instead.
				.parseErr = scanner.Error{Pos: .fset.Position(.Pos()), Msg: "invalid import path: " + }
				.imports = nil
				return nil
			}
			if  == "embed" {
				 = true
			}

			 := .Doc
			if  == nil && len(.Specs) == 1 {
				 = .Doc
			}
			.imports = append(.imports, fileImport{, .Pos(), })
		}
	}

	// Extract directives.
	for ,  := range .parsed.Comments {
		if .Pos() >= .parsed.Package {
			break
		}
		for ,  := range .List {
			if strings.HasPrefix(.Text, "//go:") {
				.directives = append(.directives, Directive{.Text, .fset.Position(.Slash)})
			}
		}
	}

	// If the file imports "embed",
	// we have to look for //go:embed comments
	// in the remainder of the file.
	// The compiler will enforce the mapping of comments to
	// declared variables. We just need to know the patterns.
	// If there were //go:embed comments earlier in the file
	// (near the package statement or imports), the compiler
	// will reject them. They can be (and have already been) ignored.
	if  {
		.readRest()
		 := token.NewFileSet()
		 := .AddFile(.pos.Filename, -1, len(.buf))
		var  scanner.Scanner
		.Init(, .buf, nil, scanner.ScanComments)
		for {
			, ,  := .Scan()
			if  == token.EOF {
				break
			}
			if  == token.COMMENT && strings.HasPrefix(, "//go:embed") {
				// Ignore badly-formed lines - the compiler will report them when it finds them,
				// and we can pretend they are not there to help go list succeed with what it knows.
				,  := parseGoEmbed(, , )
				if  == nil {
					.embeds = append(.embeds, ...)
				}
			}
		}
	}

	return nil
}

// isValidImport checks if the import is a valid import using the more strict
// checks allowed by the implementation restriction in https://go.dev/ref/spec#Import_declarations.
// It was ported from the function of the same name that was removed from the
// parser in CL 424855, when the parser stopped doing these checks.
func isValidImport( string) bool {
	const  = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
	for ,  := range  {
		if !unicode.IsGraphic() || unicode.IsSpace() || strings.ContainsRune(, ) {
			return false
		}
	}
	return  != ""
}

// parseGoEmbed parses a "//go:embed" to extract the glob patterns.
// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
// This must match the behavior of cmd/compile/internal/noder.go.
func parseGoEmbed( *token.FileSet,  token.Pos,  string) ([]fileEmbed, error) {
	,  := ast.ParseDirective(, )
	if ! || .Tool != "go" || .Name != "embed" {
		return nil, nil
	}
	,  := .ParseArgs()
	if  != nil {
		return nil, 
	}
	var  []fileEmbed
	for ,  := range  {
		 = append(, fileEmbed{.Arg, .Position(.Pos)})
	}
	return , nil
}