// Copyright 2011 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package parseimport ()// item represents a token or text string returned from the scanner.type item struct { typ itemType// The type of this item. pos Pos// The starting position, in bytes, of this item in the input string. val string// The value of this item. line int// The line number at the start of this item.}func ( item) () string {switch {case .typ == itemEOF:return"EOF"case .typ == itemError:return .valcase .typ > itemKeyword:returnfmt.Sprintf("<%s>", .val)caselen(.val) > 10:returnfmt.Sprintf("%.10q...", .val) }returnfmt.Sprintf("%q", .val)}// itemType identifies the type of lex items.type itemType intconst ( itemError itemType = iota// error occurred; value is text of error itemBool // boolean constant itemChar // printable ASCII character; grab bag for comma etc. itemCharConstant // character constant itemComment // comment text itemComplex // complex constant (1+2i); imaginary is just a number itemAssign // equals ('=') introducing an assignment itemDeclare // colon-equals (':=') introducing a declaration itemEOF itemField // alphanumeric identifier starting with '.' itemIdentifier // alphanumeric identifier not starting with '.' itemLeftDelim // left action delimiter itemLeftParen // '(' inside action itemNumber // simple number, including imaginary itemPipe // pipe symbol itemRawString // raw quoted string (includes quotes) itemRightDelim // right action delimiter itemRightParen // ')' inside action itemSpace // run of spaces separating arguments itemString // quoted string (includes quotes) itemText // plain text itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'// Keywords appear after all the rest. itemKeyword // used only to delimit the keywords itemBlock // block keyword itemBreak // break keyword itemContinue // continue keyword itemDot // the cursor, spelled '.' itemDefine // define keyword itemElse // else keyword itemEnd // end keyword itemIf // if keyword itemNil // the untyped nil constant, easiest to treat as a keyword itemRange // range keyword itemTemplate // template keyword itemWith // with keyword)var key = map[string]itemType{".": itemDot,"block": itemBlock,"break": itemBreak,"continue": itemContinue,"define": itemDefine,"else": itemElse,"end": itemEnd,"if": itemIf,"range": itemRange,"nil": itemNil,"template": itemTemplate,"with": itemWith,}const eof = -1// Trimming spaces.// If the action begins "{{- " rather than "{{", then all space/tab/newlines// preceding the action are trimmed; conversely if it ends " -}}" the// leading spaces are trimmed. This is done entirely in the lexer; the// parser never sees it happen. We require an ASCII space (' ', \t, \r, \n)// to be present to avoid ambiguity with things like "{{-3}}". It reads// better with the space present anyway. For simplicity, only ASCII// does the job.const ( spaceChars = " \t\r\n"// These are the space characters defined by Go itself. trimMarker = '-'// Attached to left/right delimiter, trims trailing spaces from preceding/following text. trimMarkerLen = Pos(1 + 1) // marker plus space before or after)// stateFn represents the state of the scanner as a function that returns the next state.type stateFn func(*lexer) stateFn// lexer holds the state of the scanner.type lexer struct { name string// the name of the input; used only for error reports input string// the string being scanned leftDelim string// start of action marker rightDelim string// end of action marker pos Pos// current position in the input start Pos// start position of this item atEOF bool// we have hit the end of input and returned eof parenDepth int// nesting depth of ( ) exprs line int// 1+number of newlines seen startLine int// start line of this item item item// item to return to parser insideAction bool// are we inside an action? options lexOptions}// lexOptions control behavior of the lexer. All default to false.type lexOptions struct { emitComment bool// emit itemComment tokens. breakOK bool// break keyword allowed continueOK bool// continue keyword allowed}// next returns the next rune in the input.func ( *lexer) () rune {ifint(.pos) >= len(.input) { .atEOF = truereturneof } , := utf8.DecodeRuneInString(.input[.pos:]) .pos += Pos()if == '\n' { .line++ }return}// peek returns but does not consume the next rune in the input.func ( *lexer) () rune { := .next() .backup()return}// backup steps back one rune.func ( *lexer) () {if !.atEOF && .pos > 0 { , := utf8.DecodeLastRuneInString(.input[:.pos]) .pos -= Pos()// Correct newline count.if == '\n' { .line-- } }}// thisItem returns the item at the current input point with the specified type// and advances the input.func ( *lexer) ( itemType) item { := item{, .start, .input[.start:.pos], .startLine} .start = .pos .startLine = .linereturn}// emit passes the trailing text as an item back to the parser.func ( *lexer) ( itemType) stateFn {return .emitItem(.thisItem())}// emitItem passes the specified item to the parser.func ( *lexer) ( item) stateFn { .item = returnnil}// ignore skips over the pending input before this point.// It tracks newlines in the ignored text, so use it only// for text that is skipped without calling l.next.func ( *lexer) () { .line += strings.Count(.input[.start:.pos], "\n") .start = .pos .startLine = .line}// accept consumes the next rune if it's from the valid set.func ( *lexer) ( string) bool {ifstrings.ContainsRune(, .next()) {returntrue } .backup()returnfalse}// acceptRun consumes a run of runes from the valid set.func ( *lexer) ( string) {forstrings.ContainsRune(, .next()) { } .backup()}// errorf returns an error token and terminates the scan by passing// back a nil pointer that will be the next state, terminating l.nextItem.func ( *lexer) ( string, ...any) stateFn { .item = item{itemError, .start, fmt.Sprintf(, ...), .startLine} .start = 0 .pos = 0 .input = .input[:0]returnnil}// nextItem returns the next item from the input.// Called by the parser, not in the lexing goroutine.func ( *lexer) () item { .item = item{itemEOF, .pos, "EOF", .startLine} := lexTextif .insideAction { = lexInsideAction }for { = ()if == nil {return .item } }}// lex creates a new scanner for the input string.func lex(, , , string) *lexer {if == "" { = leftDelim }if == "" { = rightDelim } := &lexer{name: ,input: ,leftDelim: ,rightDelim: ,line: 1,startLine: 1,insideAction: false, }return}// state functionsconst ( leftDelim = "{{" rightDelim = "}}" leftComment = "/*" rightComment = "*/")// lexText scans until an opening action delimiter, "{{".func lexText( *lexer) stateFn {if := strings.Index(.input[.pos:], .leftDelim); >= 0 {if > 0 { .pos += Pos()// Do we trim any trailing space? := Pos(0) := .pos + Pos(len(.leftDelim))ifhasLeftTrimMarker(.input[:]) { = rightTrimLength(.input[.start:.pos]) } .pos -= .line += strings.Count(.input[.start:.pos], "\n") := .thisItem(itemText) .pos += .ignore()iflen(.val) > 0 {return .emitItem() } }returnlexLeftDelim } .pos = Pos(len(.input))// Correctly reached EOF.if .pos > .start { .line += strings.Count(.input[.start:.pos], "\n")return .emit(itemText) }return .emit(itemEOF)}// rightTrimLength returns the length of the spaces at the end of the string.func rightTrimLength( string) Pos {returnPos(len() - len(strings.TrimRight(, spaceChars)))}// atRightDelim reports whether the lexer is at a right delimiter, possibly preceded by a trim marker.func ( *lexer) () (, bool) {ifhasRightTrimMarker(.input[.pos:]) && strings.HasPrefix(.input[.pos+trimMarkerLen:], .rightDelim) { // With trim marker.returntrue, true }ifstrings.HasPrefix(.input[.pos:], .rightDelim) { // Without trim marker.returntrue, false }returnfalse, false}// leftTrimLength returns the length of the spaces at the beginning of the string.func leftTrimLength( string) Pos {returnPos(len() - len(strings.TrimLeft(, spaceChars)))}// lexLeftDelim scans the left delimiter, which is known to be present, possibly with a trim marker.// (The text to be trimmed has already been emitted.)func lexLeftDelim( *lexer) stateFn { .pos += Pos(len(.leftDelim)) := hasLeftTrimMarker(.input[.pos:]) := Pos(0)if { = trimMarkerLen }ifstrings.HasPrefix(.input[.pos+:], leftComment) { .pos += .ignore()returnlexComment } := .thisItem(itemLeftDelim) .insideAction = true .pos += .ignore() .parenDepth = 0return .emitItem()}// lexComment scans a comment. The left comment marker is known to be present.func lexComment( *lexer) stateFn { .pos += Pos(len(leftComment)) := strings.Index(.input[.pos:], rightComment)if < 0 {return .errorf("unclosed comment") } .pos += Pos( + len(rightComment)) , := .atRightDelim()if ! {return .errorf("comment ends before closing delimiter") } := .thisItem(itemComment)if { .pos += trimMarkerLen } .pos += Pos(len(.rightDelim))if { .pos += leftTrimLength(.input[.pos:]) } .ignore()if .options.emitComment {return .emitItem() }returnlexText}// lexRightDelim scans the right delimiter, which is known to be present, possibly with a trim marker.func lexRightDelim( *lexer) stateFn { , := .atRightDelim()if { .pos += trimMarkerLen .ignore() } .pos += Pos(len(.rightDelim)) := .thisItem(itemRightDelim)if { .pos += leftTrimLength(.input[.pos:]) .ignore() } .insideAction = falsereturn .emitItem()}// lexInsideAction scans the elements inside action delimiters.func lexInsideAction( *lexer) stateFn {// Either number, quoted string, or identifier. // Spaces separate arguments; runs of spaces turn into itemSpace. // Pipe symbols separate and are emitted. , := .atRightDelim()if {if .parenDepth == 0 {returnlexRightDelim }return .errorf("unclosed left paren") }switch := .next(); {case == eof:return .errorf("unclosed action")caseisSpace(): .backup() // Put space back in case we have " -}}".returnlexSpacecase == '=':return .emit(itemAssign)case == ':':if .next() != '=' {return .errorf("expected :=") }return .emit(itemDeclare)case == '|':return .emit(itemPipe)case == '"':returnlexQuotecase == '`':returnlexRawQuotecase == '$':returnlexVariablecase == '\'':returnlexCharcase == '.':// special look-ahead for ".field" so we don't break l.backup().if .pos < Pos(len(.input)) { := .input[.pos]if < '0' || '9' < {returnlexField } }fallthrough// '.' can start a number.case == '+' || == '-' || ('0' <= && <= '9'): .backup()returnlexNumbercaseisAlphaNumeric(): .backup()returnlexIdentifiercase == '(': .parenDepth++return .emit(itemLeftParen)case == ')': .parenDepth--if .parenDepth < 0 {return .errorf("unexpected right paren") }return .emit(itemRightParen)case <= unicode.MaxASCII && unicode.IsPrint():return .emit(itemChar)default:return .errorf("unrecognized character in action: %#U", ) }}// lexSpace scans a run of space characters.// We have not consumed the first space, which is known to be present.// Take care if there is a trim-marked right delimiter, which starts with a space.func lexSpace( *lexer) stateFn {varrunevarintfor { = .peek()if !isSpace() {break } .next() ++ }// Be careful about a trim-marked closing delimiter, which has a minus // after a space. We know there is a space, so check for the '-' that might follow.ifhasRightTrimMarker(.input[.pos-1:]) && strings.HasPrefix(.input[.pos-1+trimMarkerLen:], .rightDelim) { .backup() // Before the space.if == 1 {returnlexRightDelim// On the delim, so go right to that. } }return .emit(itemSpace)}// lexIdentifier scans an alphanumeric.func lexIdentifier( *lexer) stateFn {for {switch := .next(); {caseisAlphaNumeric():// absorb.default: .backup() := .input[.start:.pos]if !.atTerminator() {return .errorf("bad character %#U", ) }switch {casekey[] > itemKeyword: := key[]if == itemBreak && !.options.breakOK || == itemContinue && !.options.continueOK {return .emit(itemIdentifier) }return .emit()case [0] == '.':return .emit(itemField)case == "true", == "false":return .emit(itemBool)default:return .emit(itemIdentifier) } } }}// lexField scans a field: .Alphanumeric.// The . has been scanned.func lexField( *lexer) stateFn {returnlexFieldOrVariable(, itemField)}// lexVariable scans a Variable: $Alphanumeric.// The $ has been scanned.func lexVariable( *lexer) stateFn {if .atTerminator() { // Nothing interesting follows -> "$".return .emit(itemVariable) }returnlexFieldOrVariable(, itemVariable)}// lexFieldOrVariable scans a field or variable: [.$]Alphanumeric.// The . or $ has been scanned.func lexFieldOrVariable( *lexer, itemType) stateFn {if .atTerminator() { // Nothing interesting follows -> "." or "$".if == itemVariable {return .emit(itemVariable) }return .emit(itemDot) }varrunefor { = .next()if !isAlphaNumeric() { .backup()break } }if !.atTerminator() {return .errorf("bad character %#U", ) }return .emit()}// atTerminator reports whether the input is at valid termination character to// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases// like "$x+2" not being acceptable without a space, in case we decide one// day to implement arithmetic.func ( *lexer) () bool { := .peek()ifisSpace() {returntrue }switch {caseeof, '.', ',', '|', ':', ')', '(':returntrue }returnstrings.HasPrefix(.input[.pos:], .rightDelim)}// lexChar scans a character constant. The initial quote is already// scanned. Syntax checking is done by the parser.func lexChar( *lexer) stateFn {:for {switch .next() {case'\\':if := .next(); != eof && != '\n' {break }fallthroughcaseeof, '\n':return .errorf("unterminated character constant")case'\'':break } }return .emit(itemCharConstant)}// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"// and "089" - but when it's wrong the input is invalid and the parser (via// strconv) will notice.func lexNumber( *lexer) stateFn {if !.scanNumber() {return .errorf("bad number syntax: %q", .input[.start:.pos]) }if := .peek(); == '+' || == '-' {// Complex: 1+2i. No spaces, must end in 'i'.if !.scanNumber() || .input[.pos-1] != 'i' {return .errorf("bad number syntax: %q", .input[.start:.pos]) }return .emit(itemComplex) }return .emit(itemNumber)}func ( *lexer) () bool {// Optional leading sign. .accept("+-")// Is it hex? := "0123456789_"if .accept("0") {// Note: Leading 0 does not mean octal in floats.if .accept("xX") { = "0123456789abcdefABCDEF_" } elseif .accept("oO") { = "01234567_" } elseif .accept("bB") { = "01_" } } .acceptRun()if .accept(".") { .acceptRun() }iflen() == 10+1 && .accept("eE") { .accept("+-") .acceptRun("0123456789_") }iflen() == 16+6+1 && .accept("pP") { .accept("+-") .acceptRun("0123456789_") }// Is it imaginary? .accept("i")// Next thing mustn't be alphanumeric.ifisAlphaNumeric(.peek()) { .next()returnfalse }returntrue}// lexQuote scans a quoted string.func lexQuote( *lexer) stateFn {:for {switch .next() {case'\\':if := .next(); != eof && != '\n' {break }fallthroughcaseeof, '\n':return .errorf("unterminated quoted string")case'"':break } }return .emit(itemString)}// lexRawQuote scans a raw quoted string.func lexRawQuote( *lexer) stateFn {:for {switch .next() {caseeof:return .errorf("unterminated raw quoted string")case'`':break } }return .emit(itemRawString)}// isSpace reports whether r is a space character.func isSpace( rune) bool {return == ' ' || == '\t' || == '\r' || == '\n'}// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.func isAlphaNumeric( rune) bool {return == '_' || unicode.IsLetter() || unicode.IsDigit()}func hasLeftTrimMarker( string) bool {returnlen() >= 2 && [0] == trimMarker && isSpace(rune([1]))}func hasRightTrimMarker( string) bool {returnlen() >= 2 && isSpace(rune([0])) && [1] == trimMarker}
The pages are generated with Goldsv0.7.0-preview. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @zigo_101 (reachable from the left QR code) to get the latest news of Golds.