mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-29 10:57:44 +09:00 
			
		
		
		
	Use vendored go-swagger (#8087)
* Use vendored go-swagger * vendor go-swagger * revert un wanteed change * remove un-needed GO111MODULE * Update Makefile Co-Authored-By: techknowlogick <matti@mdranta.net>
This commit is contained in:
		
				
					committed by
					
						 Lauris BH
						Lauris BH
					
				
			
			
				
	
			
			
			
						parent
						
							4cb1bdddc8
						
					
				
				
					commit
					9fe4437bda
				
			
							
								
								
									
										117
									
								
								vendor/github.com/hashicorp/hcl/json/parser/flatten.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								vendor/github.com/hashicorp/hcl/json/parser/flatten.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | ||||
| package parser | ||||
|  | ||||
| import "github.com/hashicorp/hcl/hcl/ast" | ||||
|  | ||||
| // flattenObjects takes an AST node, walks it, and flattens | ||||
| func flattenObjects(node ast.Node) { | ||||
| 	ast.Walk(node, func(n ast.Node) (ast.Node, bool) { | ||||
| 		// We only care about lists, because this is what we modify | ||||
| 		list, ok := n.(*ast.ObjectList) | ||||
| 		if !ok { | ||||
| 			return n, true | ||||
| 		} | ||||
|  | ||||
| 		// Rebuild the item list | ||||
| 		items := make([]*ast.ObjectItem, 0, len(list.Items)) | ||||
| 		frontier := make([]*ast.ObjectItem, len(list.Items)) | ||||
| 		copy(frontier, list.Items) | ||||
| 		for len(frontier) > 0 { | ||||
| 			// Pop the current item | ||||
| 			n := len(frontier) | ||||
| 			item := frontier[n-1] | ||||
| 			frontier = frontier[:n-1] | ||||
|  | ||||
| 			switch v := item.Val.(type) { | ||||
| 			case *ast.ObjectType: | ||||
| 				items, frontier = flattenObjectType(v, item, items, frontier) | ||||
| 			case *ast.ListType: | ||||
| 				items, frontier = flattenListType(v, item, items, frontier) | ||||
| 			default: | ||||
| 				items = append(items, item) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Reverse the list since the frontier model runs things backwards | ||||
| 		for i := len(items)/2 - 1; i >= 0; i-- { | ||||
| 			opp := len(items) - 1 - i | ||||
| 			items[i], items[opp] = items[opp], items[i] | ||||
| 		} | ||||
|  | ||||
| 		// Done! Set the original items | ||||
| 		list.Items = items | ||||
| 		return n, true | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func flattenListType( | ||||
| 	ot *ast.ListType, | ||||
| 	item *ast.ObjectItem, | ||||
| 	items []*ast.ObjectItem, | ||||
| 	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||||
| 	// If the list is empty, keep the original list | ||||
| 	if len(ot.List) == 0 { | ||||
| 		items = append(items, item) | ||||
| 		return items, frontier | ||||
| 	} | ||||
|  | ||||
| 	// All the elements of this object must also be objects! | ||||
| 	for _, subitem := range ot.List { | ||||
| 		if _, ok := subitem.(*ast.ObjectType); !ok { | ||||
| 			items = append(items, item) | ||||
| 			return items, frontier | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Great! We have a match go through all the items and flatten | ||||
| 	for _, elem := range ot.List { | ||||
| 		// Add it to the frontier so that we can recurse | ||||
| 		frontier = append(frontier, &ast.ObjectItem{ | ||||
| 			Keys:        item.Keys, | ||||
| 			Assign:      item.Assign, | ||||
| 			Val:         elem, | ||||
| 			LeadComment: item.LeadComment, | ||||
| 			LineComment: item.LineComment, | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return items, frontier | ||||
| } | ||||
|  | ||||
| func flattenObjectType( | ||||
| 	ot *ast.ObjectType, | ||||
| 	item *ast.ObjectItem, | ||||
| 	items []*ast.ObjectItem, | ||||
| 	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { | ||||
| 	// If the list has no items we do not have to flatten anything | ||||
| 	if ot.List.Items == nil { | ||||
| 		items = append(items, item) | ||||
| 		return items, frontier | ||||
| 	} | ||||
|  | ||||
| 	// All the elements of this object must also be objects! | ||||
| 	for _, subitem := range ot.List.Items { | ||||
| 		if _, ok := subitem.Val.(*ast.ObjectType); !ok { | ||||
| 			items = append(items, item) | ||||
| 			return items, frontier | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Great! We have a match go through all the items and flatten | ||||
| 	for _, subitem := range ot.List.Items { | ||||
| 		// Copy the new key | ||||
| 		keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) | ||||
| 		copy(keys, item.Keys) | ||||
| 		copy(keys[len(item.Keys):], subitem.Keys) | ||||
|  | ||||
| 		// Add it to the frontier so that we can recurse | ||||
| 		frontier = append(frontier, &ast.ObjectItem{ | ||||
| 			Keys:        keys, | ||||
| 			Assign:      item.Assign, | ||||
| 			Val:         subitem.Val, | ||||
| 			LeadComment: item.LeadComment, | ||||
| 			LineComment: item.LineComment, | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return items, frontier | ||||
| } | ||||
							
								
								
									
										313
									
								
								vendor/github.com/hashicorp/hcl/json/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										313
									
								
								vendor/github.com/hashicorp/hcl/json/parser/parser.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,313 @@ | ||||
| package parser | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/hashicorp/hcl/hcl/ast" | ||||
| 	hcltoken "github.com/hashicorp/hcl/hcl/token" | ||||
| 	"github.com/hashicorp/hcl/json/scanner" | ||||
| 	"github.com/hashicorp/hcl/json/token" | ||||
| ) | ||||
|  | ||||
| type Parser struct { | ||||
| 	sc *scanner.Scanner | ||||
|  | ||||
| 	// Last read token | ||||
| 	tok       token.Token | ||||
| 	commaPrev token.Token | ||||
|  | ||||
| 	enableTrace bool | ||||
| 	indent      int | ||||
| 	n           int // buffer size (max = 1) | ||||
| } | ||||
|  | ||||
| func newParser(src []byte) *Parser { | ||||
| 	return &Parser{ | ||||
| 		sc: scanner.New(src), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Parse returns the fully parsed source and returns the abstract syntax tree. | ||||
| func Parse(src []byte) (*ast.File, error) { | ||||
| 	p := newParser(src) | ||||
| 	return p.Parse() | ||||
| } | ||||
|  | ||||
| var errEofToken = errors.New("EOF token found") | ||||
|  | ||||
| // Parse returns the fully parsed source and returns the abstract syntax tree. | ||||
| func (p *Parser) Parse() (*ast.File, error) { | ||||
| 	f := &ast.File{} | ||||
| 	var err, scerr error | ||||
| 	p.sc.Error = func(pos token.Pos, msg string) { | ||||
| 		scerr = fmt.Errorf("%s: %s", pos, msg) | ||||
| 	} | ||||
|  | ||||
| 	// The root must be an object in JSON | ||||
| 	object, err := p.object() | ||||
| 	if scerr != nil { | ||||
| 		return nil, scerr | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// We make our final node an object list so it is more HCL compatible | ||||
| 	f.Node = object.List | ||||
|  | ||||
| 	// Flatten it, which finds patterns and turns them into more HCL-like | ||||
| 	// AST trees. | ||||
| 	flattenObjects(f.Node) | ||||
|  | ||||
| 	return f, nil | ||||
| } | ||||
|  | ||||
| func (p *Parser) objectList() (*ast.ObjectList, error) { | ||||
| 	defer un(trace(p, "ParseObjectList")) | ||||
| 	node := &ast.ObjectList{} | ||||
|  | ||||
| 	for { | ||||
| 		n, err := p.objectItem() | ||||
| 		if err == errEofToken { | ||||
| 			break // we are finished | ||||
| 		} | ||||
|  | ||||
| 		// we don't return a nil node, because might want to use already | ||||
| 		// collected items. | ||||
| 		if err != nil { | ||||
| 			return node, err | ||||
| 		} | ||||
|  | ||||
| 		node.Add(n) | ||||
|  | ||||
| 		// Check for a followup comma. If it isn't a comma, then we're done | ||||
| 		if tok := p.scan(); tok.Type != token.COMMA { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return node, nil | ||||
| } | ||||
|  | ||||
| // objectItem parses a single object item | ||||
| func (p *Parser) objectItem() (*ast.ObjectItem, error) { | ||||
| 	defer un(trace(p, "ParseObjectItem")) | ||||
|  | ||||
| 	keys, err := p.objectKey() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	o := &ast.ObjectItem{ | ||||
| 		Keys: keys, | ||||
| 	} | ||||
|  | ||||
| 	switch p.tok.Type { | ||||
| 	case token.COLON: | ||||
| 		pos := p.tok.Pos | ||||
| 		o.Assign = hcltoken.Pos{ | ||||
| 			Filename: pos.Filename, | ||||
| 			Offset:   pos.Offset, | ||||
| 			Line:     pos.Line, | ||||
| 			Column:   pos.Column, | ||||
| 		} | ||||
|  | ||||
| 		o.Val, err = p.objectValue() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return o, nil | ||||
| } | ||||
|  | ||||
| // objectKey parses an object key and returns a ObjectKey AST | ||||
| func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { | ||||
| 	keyCount := 0 | ||||
| 	keys := make([]*ast.ObjectKey, 0) | ||||
|  | ||||
| 	for { | ||||
| 		tok := p.scan() | ||||
| 		switch tok.Type { | ||||
| 		case token.EOF: | ||||
| 			return nil, errEofToken | ||||
| 		case token.STRING: | ||||
| 			keyCount++ | ||||
| 			keys = append(keys, &ast.ObjectKey{ | ||||
| 				Token: p.tok.HCLToken(), | ||||
| 			}) | ||||
| 		case token.COLON: | ||||
| 			// If we have a zero keycount it means that we never got | ||||
| 			// an object key, i.e. `{ :`. This is a syntax error. | ||||
| 			if keyCount == 0 { | ||||
| 				return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||||
| 			} | ||||
|  | ||||
| 			// Done | ||||
| 			return keys, nil | ||||
| 		case token.ILLEGAL: | ||||
| 			return nil, errors.New("illegal") | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // object parses any type of object, such as number, bool, string, object or | ||||
| // list. | ||||
| func (p *Parser) objectValue() (ast.Node, error) { | ||||
| 	defer un(trace(p, "ParseObjectValue")) | ||||
| 	tok := p.scan() | ||||
|  | ||||
| 	switch tok.Type { | ||||
| 	case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: | ||||
| 		return p.literalType() | ||||
| 	case token.LBRACE: | ||||
| 		return p.objectType() | ||||
| 	case token.LBRACK: | ||||
| 		return p.listType() | ||||
| 	case token.EOF: | ||||
| 		return nil, errEofToken | ||||
| 	} | ||||
|  | ||||
| 	return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) | ||||
| } | ||||
|  | ||||
| // object parses any type of object, such as number, bool, string, object or | ||||
| // list. | ||||
| func (p *Parser) object() (*ast.ObjectType, error) { | ||||
| 	defer un(trace(p, "ParseType")) | ||||
| 	tok := p.scan() | ||||
|  | ||||
| 	switch tok.Type { | ||||
| 	case token.LBRACE: | ||||
| 		return p.objectType() | ||||
| 	case token.EOF: | ||||
| 		return nil, errEofToken | ||||
| 	} | ||||
|  | ||||
| 	return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) | ||||
| } | ||||
|  | ||||
| // objectType parses an object type and returns a ObjectType AST | ||||
| func (p *Parser) objectType() (*ast.ObjectType, error) { | ||||
| 	defer un(trace(p, "ParseObjectType")) | ||||
|  | ||||
| 	// we assume that the currently scanned token is a LBRACE | ||||
| 	o := &ast.ObjectType{} | ||||
|  | ||||
| 	l, err := p.objectList() | ||||
|  | ||||
| 	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's | ||||
| 	// not a RBRACE, it's an syntax error and we just return it. | ||||
| 	if err != nil && p.tok.Type != token.RBRACE { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	o.List = l | ||||
| 	return o, nil | ||||
| } | ||||
|  | ||||
| // listType parses a list type and returns a ListType AST | ||||
| func (p *Parser) listType() (*ast.ListType, error) { | ||||
| 	defer un(trace(p, "ParseListType")) | ||||
|  | ||||
| 	// we assume that the currently scanned token is a LBRACK | ||||
| 	l := &ast.ListType{} | ||||
|  | ||||
| 	for { | ||||
| 		tok := p.scan() | ||||
| 		switch tok.Type { | ||||
| 		case token.NUMBER, token.FLOAT, token.STRING: | ||||
| 			node, err := p.literalType() | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			l.Add(node) | ||||
| 		case token.COMMA: | ||||
| 			continue | ||||
| 		case token.LBRACE: | ||||
| 			node, err := p.objectType() | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			l.Add(node) | ||||
| 		case token.BOOL: | ||||
| 			// TODO(arslan) should we support? not supported by HCL yet | ||||
| 		case token.LBRACK: | ||||
| 			// TODO(arslan) should we support nested lists? Even though it's | ||||
| 			// written in README of HCL, it's not a part of the grammar | ||||
| 			// (not defined in parse.y) | ||||
| 		case token.RBRACK: | ||||
| 			// finished | ||||
| 			return l, nil | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // literalType parses a literal type and returns a LiteralType AST | ||||
| func (p *Parser) literalType() (*ast.LiteralType, error) { | ||||
| 	defer un(trace(p, "ParseLiteral")) | ||||
|  | ||||
| 	return &ast.LiteralType{ | ||||
| 		Token: p.tok.HCLToken(), | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // scan returns the next token from the underlying scanner. If a token has | ||||
| // been unscanned then read that instead. | ||||
| func (p *Parser) scan() token.Token { | ||||
| 	// If we have a token on the buffer, then return it. | ||||
| 	if p.n != 0 { | ||||
| 		p.n = 0 | ||||
| 		return p.tok | ||||
| 	} | ||||
|  | ||||
| 	p.tok = p.sc.Scan() | ||||
| 	return p.tok | ||||
| } | ||||
|  | ||||
| // unscan pushes the previously read token back onto the buffer. | ||||
| func (p *Parser) unscan() { | ||||
| 	p.n = 1 | ||||
| } | ||||
|  | ||||
| // ---------------------------------------------------------------------------- | ||||
| // Parsing support | ||||
|  | ||||
| func (p *Parser) printTrace(a ...interface{}) { | ||||
| 	if !p.enableTrace { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " | ||||
| 	const n = len(dots) | ||||
| 	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) | ||||
|  | ||||
| 	i := 2 * p.indent | ||||
| 	for i > n { | ||||
| 		fmt.Print(dots) | ||||
| 		i -= n | ||||
| 	} | ||||
| 	// i <= n | ||||
| 	fmt.Print(dots[0:i]) | ||||
| 	fmt.Println(a...) | ||||
| } | ||||
|  | ||||
| func trace(p *Parser, msg string) *Parser { | ||||
| 	p.printTrace(msg, "(") | ||||
| 	p.indent++ | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| // Usage pattern: defer un(trace(p, "...")) | ||||
| func un(p *Parser) { | ||||
| 	p.indent-- | ||||
| 	p.printTrace(")") | ||||
| } | ||||
							
								
								
									
										451
									
								
								vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										451
									
								
								vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,451 @@ | ||||
| package scanner | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
|  | ||||
| 	"github.com/hashicorp/hcl/json/token" | ||||
| ) | ||||
|  | ||||
| // eof represents a marker rune for the end of the reader. | ||||
| const eof = rune(0) | ||||
|  | ||||
| // Scanner defines a lexical scanner | ||||
| type Scanner struct { | ||||
| 	buf *bytes.Buffer // Source buffer for advancing and scanning | ||||
| 	src []byte        // Source buffer for immutable access | ||||
|  | ||||
| 	// Source Position | ||||
| 	srcPos  token.Pos // current position | ||||
| 	prevPos token.Pos // previous position, used for peek() method | ||||
|  | ||||
| 	lastCharLen int // length of last character in bytes | ||||
| 	lastLineLen int // length of last line in characters (for correct column reporting) | ||||
|  | ||||
| 	tokStart int // token text start position | ||||
| 	tokEnd   int // token text end  position | ||||
|  | ||||
| 	// Error is called for each error encountered. If no Error | ||||
| 	// function is set, the error is reported to os.Stderr. | ||||
| 	Error func(pos token.Pos, msg string) | ||||
|  | ||||
| 	// ErrorCount is incremented by one for each error encountered. | ||||
| 	ErrorCount int | ||||
|  | ||||
| 	// tokPos is the start position of most recently scanned token; set by | ||||
| 	// Scan. The Filename field is always left untouched by the Scanner.  If | ||||
| 	// an error is reported (via Error) and Position is invalid, the scanner is | ||||
| 	// not inside a token. | ||||
| 	tokPos token.Pos | ||||
| } | ||||
|  | ||||
| // New creates and initializes a new instance of Scanner using src as | ||||
| // its source content. | ||||
| func New(src []byte) *Scanner { | ||||
| 	// even though we accept a src, we read from a io.Reader compatible type | ||||
| 	// (*bytes.Buffer). So in the future we might easily change it to streaming | ||||
| 	// read. | ||||
| 	b := bytes.NewBuffer(src) | ||||
| 	s := &Scanner{ | ||||
| 		buf: b, | ||||
| 		src: src, | ||||
| 	} | ||||
|  | ||||
| 	// srcPosition always starts with 1 | ||||
| 	s.srcPos.Line = 1 | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // next reads the next rune from the bufferred reader. Returns the rune(0) if | ||||
| // an error occurs (or io.EOF is returned). | ||||
| func (s *Scanner) next() rune { | ||||
| 	ch, size, err := s.buf.ReadRune() | ||||
| 	if err != nil { | ||||
| 		// advance for error reporting | ||||
| 		s.srcPos.Column++ | ||||
| 		s.srcPos.Offset += size | ||||
| 		s.lastCharLen = size | ||||
| 		return eof | ||||
| 	} | ||||
|  | ||||
| 	if ch == utf8.RuneError && size == 1 { | ||||
| 		s.srcPos.Column++ | ||||
| 		s.srcPos.Offset += size | ||||
| 		s.lastCharLen = size | ||||
| 		s.err("illegal UTF-8 encoding") | ||||
| 		return ch | ||||
| 	} | ||||
|  | ||||
| 	// remember last position | ||||
| 	s.prevPos = s.srcPos | ||||
|  | ||||
| 	s.srcPos.Column++ | ||||
| 	s.lastCharLen = size | ||||
| 	s.srcPos.Offset += size | ||||
|  | ||||
| 	if ch == '\n' { | ||||
| 		s.srcPos.Line++ | ||||
| 		s.lastLineLen = s.srcPos.Column | ||||
| 		s.srcPos.Column = 0 | ||||
| 	} | ||||
|  | ||||
| 	// debug | ||||
| 	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // unread unreads the previous read Rune and updates the source position | ||||
| func (s *Scanner) unread() { | ||||
| 	if err := s.buf.UnreadRune(); err != nil { | ||||
| 		panic(err) // this is user fault, we should catch it | ||||
| 	} | ||||
| 	s.srcPos = s.prevPos // put back last position | ||||
| } | ||||
|  | ||||
| // peek returns the next rune without advancing the reader. | ||||
| func (s *Scanner) peek() rune { | ||||
| 	peek, _, err := s.buf.ReadRune() | ||||
| 	if err != nil { | ||||
| 		return eof | ||||
| 	} | ||||
|  | ||||
| 	s.buf.UnreadRune() | ||||
| 	return peek | ||||
| } | ||||
|  | ||||
| // Scan scans the next token and returns the token. | ||||
| func (s *Scanner) Scan() token.Token { | ||||
| 	ch := s.next() | ||||
|  | ||||
| 	// skip white space | ||||
| 	for isWhitespace(ch) { | ||||
| 		ch = s.next() | ||||
| 	} | ||||
|  | ||||
| 	var tok token.Type | ||||
|  | ||||
| 	// token text markings | ||||
| 	s.tokStart = s.srcPos.Offset - s.lastCharLen | ||||
|  | ||||
| 	// token position, initial next() is moving the offset by one(size of rune | ||||
| 	// actually), though we are interested with the starting point | ||||
| 	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen | ||||
| 	if s.srcPos.Column > 0 { | ||||
| 		// common case: last character was not a '\n' | ||||
| 		s.tokPos.Line = s.srcPos.Line | ||||
| 		s.tokPos.Column = s.srcPos.Column | ||||
| 	} else { | ||||
| 		// last character was a '\n' | ||||
| 		// (we cannot be at the beginning of the source | ||||
| 		// since we have called next() at least once) | ||||
| 		s.tokPos.Line = s.srcPos.Line - 1 | ||||
| 		s.tokPos.Column = s.lastLineLen | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case isLetter(ch): | ||||
| 		lit := s.scanIdentifier() | ||||
| 		if lit == "true" || lit == "false" { | ||||
| 			tok = token.BOOL | ||||
| 		} else if lit == "null" { | ||||
| 			tok = token.NULL | ||||
| 		} else { | ||||
| 			s.err("illegal char") | ||||
| 		} | ||||
| 	case isDecimal(ch): | ||||
| 		tok = s.scanNumber(ch) | ||||
| 	default: | ||||
| 		switch ch { | ||||
| 		case eof: | ||||
| 			tok = token.EOF | ||||
| 		case '"': | ||||
| 			tok = token.STRING | ||||
| 			s.scanString() | ||||
| 		case '.': | ||||
| 			tok = token.PERIOD | ||||
| 			ch = s.peek() | ||||
| 			if isDecimal(ch) { | ||||
| 				tok = token.FLOAT | ||||
| 				ch = s.scanMantissa(ch) | ||||
| 				ch = s.scanExponent(ch) | ||||
| 			} | ||||
| 		case '[': | ||||
| 			tok = token.LBRACK | ||||
| 		case ']': | ||||
| 			tok = token.RBRACK | ||||
| 		case '{': | ||||
| 			tok = token.LBRACE | ||||
| 		case '}': | ||||
| 			tok = token.RBRACE | ||||
| 		case ',': | ||||
| 			tok = token.COMMA | ||||
| 		case ':': | ||||
| 			tok = token.COLON | ||||
| 		case '-': | ||||
| 			if isDecimal(s.peek()) { | ||||
| 				ch := s.next() | ||||
| 				tok = s.scanNumber(ch) | ||||
| 			} else { | ||||
| 				s.err("illegal char") | ||||
| 			} | ||||
| 		default: | ||||
| 			s.err("illegal char: " + string(ch)) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// finish token ending | ||||
| 	s.tokEnd = s.srcPos.Offset | ||||
|  | ||||
| 	// create token literal | ||||
| 	var tokenText string | ||||
| 	if s.tokStart >= 0 { | ||||
| 		tokenText = string(s.src[s.tokStart:s.tokEnd]) | ||||
| 	} | ||||
| 	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call | ||||
|  | ||||
| 	return token.Token{ | ||||
| 		Type: tok, | ||||
| 		Pos:  s.tokPos, | ||||
| 		Text: tokenText, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // scanNumber scans a HCL number definition starting with the given rune | ||||
| func (s *Scanner) scanNumber(ch rune) token.Type { | ||||
| 	zero := ch == '0' | ||||
| 	pos := s.srcPos | ||||
|  | ||||
| 	s.scanMantissa(ch) | ||||
| 	ch = s.next() // seek forward | ||||
| 	if ch == 'e' || ch == 'E' { | ||||
| 		ch = s.scanExponent(ch) | ||||
| 		return token.FLOAT | ||||
| 	} | ||||
|  | ||||
| 	if ch == '.' { | ||||
| 		ch = s.scanFraction(ch) | ||||
| 		if ch == 'e' || ch == 'E' { | ||||
| 			ch = s.next() | ||||
| 			ch = s.scanExponent(ch) | ||||
| 		} | ||||
| 		return token.FLOAT | ||||
| 	} | ||||
|  | ||||
| 	if ch != eof { | ||||
| 		s.unread() | ||||
| 	} | ||||
|  | ||||
| 	// If we have a larger number and this is zero, error | ||||
| 	if zero && pos != s.srcPos { | ||||
| 		s.err("numbers cannot start with 0") | ||||
| 	} | ||||
|  | ||||
| 	return token.NUMBER | ||||
| } | ||||
|  | ||||
| // scanMantissa scans the mantissa beginning from the rune. It returns the next | ||||
| // non decimal rune. It's used to determine wheter it's a fraction or exponent. | ||||
| func (s *Scanner) scanMantissa(ch rune) rune { | ||||
| 	scanned := false | ||||
| 	for isDecimal(ch) { | ||||
| 		ch = s.next() | ||||
| 		scanned = true | ||||
| 	} | ||||
|  | ||||
| 	if scanned && ch != eof { | ||||
| 		s.unread() | ||||
| 	} | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // scanFraction scans the fraction after the '.' rune | ||||
| func (s *Scanner) scanFraction(ch rune) rune { | ||||
| 	if ch == '.' { | ||||
| 		ch = s.peek() // we peek just to see if we can move forward | ||||
| 		ch = s.scanMantissa(ch) | ||||
| 	} | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // scanExponent scans the remaining parts of an exponent after the 'e' or 'E' | ||||
| // rune. | ||||
| func (s *Scanner) scanExponent(ch rune) rune { | ||||
| 	if ch == 'e' || ch == 'E' { | ||||
| 		ch = s.next() | ||||
| 		if ch == '-' || ch == '+' { | ||||
| 			ch = s.next() | ||||
| 		} | ||||
| 		ch = s.scanMantissa(ch) | ||||
| 	} | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // scanString scans a quoted string | ||||
| func (s *Scanner) scanString() { | ||||
| 	braces := 0 | ||||
| 	for { | ||||
| 		// '"' opening already consumed | ||||
| 		// read character after quote | ||||
| 		ch := s.next() | ||||
|  | ||||
| 		if ch == '\n' || ch < 0 || ch == eof { | ||||
| 			s.err("literal not terminated") | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		if ch == '"' { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		// If we're going into a ${} then we can ignore quotes for awhile | ||||
| 		if braces == 0 && ch == '$' && s.peek() == '{' { | ||||
| 			braces++ | ||||
| 			s.next() | ||||
| 		} else if braces > 0 && ch == '{' { | ||||
| 			braces++ | ||||
| 		} | ||||
| 		if braces > 0 && ch == '}' { | ||||
| 			braces-- | ||||
| 		} | ||||
|  | ||||
| 		if ch == '\\' { | ||||
| 			s.scanEscape() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // scanEscape scans an escape sequence | ||||
| func (s *Scanner) scanEscape() rune { | ||||
| 	// http://en.cppreference.com/w/cpp/language/escape | ||||
| 	ch := s.next() // read character after '/' | ||||
| 	switch ch { | ||||
| 	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': | ||||
| 		// nothing to do | ||||
| 	case '0', '1', '2', '3', '4', '5', '6', '7': | ||||
| 		// octal notation | ||||
| 		ch = s.scanDigits(ch, 8, 3) | ||||
| 	case 'x': | ||||
| 		// hexademical notation | ||||
| 		ch = s.scanDigits(s.next(), 16, 2) | ||||
| 	case 'u': | ||||
| 		// universal character name | ||||
| 		ch = s.scanDigits(s.next(), 16, 4) | ||||
| 	case 'U': | ||||
| 		// universal character name | ||||
| 		ch = s.scanDigits(s.next(), 16, 8) | ||||
| 	default: | ||||
| 		s.err("illegal char escape") | ||||
| 	} | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // scanDigits scans a rune with the given base for n times. For example an | ||||
| // octal notation \184 would yield in scanDigits(ch, 8, 3) | ||||
| func (s *Scanner) scanDigits(ch rune, base, n int) rune { | ||||
| 	for n > 0 && digitVal(ch) < base { | ||||
| 		ch = s.next() | ||||
| 		n-- | ||||
| 	} | ||||
| 	if n > 0 { | ||||
| 		s.err("illegal char escape") | ||||
| 	} | ||||
|  | ||||
| 	// we scanned all digits, put the last non digit char back | ||||
| 	s.unread() | ||||
| 	return ch | ||||
| } | ||||
|  | ||||
| // scanIdentifier scans an identifier and returns the literal string | ||||
| func (s *Scanner) scanIdentifier() string { | ||||
| 	offs := s.srcPos.Offset - s.lastCharLen | ||||
| 	ch := s.next() | ||||
| 	for isLetter(ch) || isDigit(ch) || ch == '-' { | ||||
| 		ch = s.next() | ||||
| 	} | ||||
|  | ||||
| 	if ch != eof { | ||||
| 		s.unread() // we got identifier, put back latest char | ||||
| 	} | ||||
|  | ||||
| 	return string(s.src[offs:s.srcPos.Offset]) | ||||
| } | ||||
|  | ||||
| // recentPosition returns the position of the character immediately after the | ||||
| // character or token returned by the last call to Scan. | ||||
| func (s *Scanner) recentPosition() (pos token.Pos) { | ||||
| 	pos.Offset = s.srcPos.Offset - s.lastCharLen | ||||
| 	switch { | ||||
| 	case s.srcPos.Column > 0: | ||||
| 		// common case: last character was not a '\n' | ||||
| 		pos.Line = s.srcPos.Line | ||||
| 		pos.Column = s.srcPos.Column | ||||
| 	case s.lastLineLen > 0: | ||||
| 		// last character was a '\n' | ||||
| 		// (we cannot be at the beginning of the source | ||||
| 		// since we have called next() at least once) | ||||
| 		pos.Line = s.srcPos.Line - 1 | ||||
| 		pos.Column = s.lastLineLen | ||||
| 	default: | ||||
| 		// at the beginning of the source | ||||
| 		pos.Line = 1 | ||||
| 		pos.Column = 1 | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // err prints the error of any scanning to s.Error function. If the function is | ||||
| // not defined, by default it prints them to os.Stderr | ||||
| func (s *Scanner) err(msg string) { | ||||
| 	s.ErrorCount++ | ||||
| 	pos := s.recentPosition() | ||||
|  | ||||
| 	if s.Error != nil { | ||||
| 		s.Error(pos, msg) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) | ||||
| } | ||||
|  | ||||
| // isHexadecimal returns true if the given rune is a letter | ||||
| func isLetter(ch rune) bool { | ||||
| 	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) | ||||
| } | ||||
|  | ||||
| // isHexadecimal returns true if the given rune is a decimal digit | ||||
| func isDigit(ch rune) bool { | ||||
| 	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) | ||||
| } | ||||
|  | ||||
| // isHexadecimal returns true if the given rune is a decimal number | ||||
| func isDecimal(ch rune) bool { | ||||
| 	return '0' <= ch && ch <= '9' | ||||
| } | ||||
|  | ||||
| // isHexadecimal returns true if the given rune is an hexadecimal number | ||||
| func isHexadecimal(ch rune) bool { | ||||
| 	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' | ||||
| } | ||||
|  | ||||
| // isWhitespace returns true if the rune is a space, tab, newline or carriage return | ||||
| func isWhitespace(ch rune) bool { | ||||
| 	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' | ||||
| } | ||||
|  | ||||
| // digitVal returns the integer value of a given octal,decimal or hexadecimal rune | ||||
| func digitVal(ch rune) int { | ||||
| 	switch { | ||||
| 	case '0' <= ch && ch <= '9': | ||||
| 		return int(ch - '0') | ||||
| 	case 'a' <= ch && ch <= 'f': | ||||
| 		return int(ch - 'a' + 10) | ||||
| 	case 'A' <= ch && ch <= 'F': | ||||
| 		return int(ch - 'A' + 10) | ||||
| 	} | ||||
| 	return 16 // larger than any legal digit val | ||||
| } | ||||
							
								
								
									
										46
									
								
								vendor/github.com/hashicorp/hcl/json/token/position.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/github.com/hashicorp/hcl/json/token/position.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| package token | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Pos describes an arbitrary source position | ||||
| // including the file, line, and column location. | ||||
| // A Position is valid if the line number is > 0. | ||||
| type Pos struct { | ||||
| 	Filename string // filename, if any | ||||
| 	Offset   int    // offset, starting at 0 | ||||
| 	Line     int    // line number, starting at 1 | ||||
| 	Column   int    // column number, starting at 1 (character count) | ||||
| } | ||||
|  | ||||
| // IsValid returns true if the position is valid. | ||||
| func (p *Pos) IsValid() bool { return p.Line > 0 } | ||||
|  | ||||
| // String returns a string in one of several forms: | ||||
| // | ||||
| //	file:line:column    valid position with file name | ||||
| //	line:column         valid position without file name | ||||
| //	file                invalid position with file name | ||||
| //	-                   invalid position without file name | ||||
| func (p Pos) String() string { | ||||
| 	s := p.Filename | ||||
| 	if p.IsValid() { | ||||
| 		if s != "" { | ||||
| 			s += ":" | ||||
| 		} | ||||
| 		s += fmt.Sprintf("%d:%d", p.Line, p.Column) | ||||
| 	} | ||||
| 	if s == "" { | ||||
| 		s = "-" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // Before reports whether the position p is before u. | ||||
| func (p Pos) Before(u Pos) bool { | ||||
| 	return u.Offset > p.Offset || u.Line > p.Line | ||||
| } | ||||
|  | ||||
| // After reports whether the position p is after u. | ||||
| func (p Pos) After(u Pos) bool { | ||||
| 	return u.Offset < p.Offset || u.Line < p.Line | ||||
| } | ||||
							
								
								
									
										118
									
								
								vendor/github.com/hashicorp/hcl/json/token/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								vendor/github.com/hashicorp/hcl/json/token/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,118 @@ | ||||
| package token | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
|  | ||||
| 	hcltoken "github.com/hashicorp/hcl/hcl/token" | ||||
| ) | ||||
|  | ||||
| // Token defines a single HCL token which can be obtained via the Scanner | ||||
| type Token struct { | ||||
| 	Type Type | ||||
| 	Pos  Pos | ||||
| 	Text string | ||||
| } | ||||
|  | ||||
| // Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) | ||||
| type Type int | ||||
|  | ||||
| const ( | ||||
| 	// Special tokens | ||||
| 	ILLEGAL Type = iota | ||||
| 	EOF | ||||
|  | ||||
| 	identifier_beg | ||||
| 	literal_beg | ||||
| 	NUMBER // 12345 | ||||
| 	FLOAT  // 123.45 | ||||
| 	BOOL   // true,false | ||||
| 	STRING // "abc" | ||||
| 	NULL   // null | ||||
| 	literal_end | ||||
| 	identifier_end | ||||
|  | ||||
| 	operator_beg | ||||
| 	LBRACK // [ | ||||
| 	LBRACE // { | ||||
| 	COMMA  // , | ||||
| 	PERIOD // . | ||||
| 	COLON  // : | ||||
|  | ||||
| 	RBRACK // ] | ||||
| 	RBRACE // } | ||||
|  | ||||
| 	operator_end | ||||
| ) | ||||
|  | ||||
| var tokens = [...]string{ | ||||
| 	ILLEGAL: "ILLEGAL", | ||||
|  | ||||
| 	EOF: "EOF", | ||||
|  | ||||
| 	NUMBER: "NUMBER", | ||||
| 	FLOAT:  "FLOAT", | ||||
| 	BOOL:   "BOOL", | ||||
| 	STRING: "STRING", | ||||
| 	NULL:   "NULL", | ||||
|  | ||||
| 	LBRACK: "LBRACK", | ||||
| 	LBRACE: "LBRACE", | ||||
| 	COMMA:  "COMMA", | ||||
| 	PERIOD: "PERIOD", | ||||
| 	COLON:  "COLON", | ||||
|  | ||||
| 	RBRACK: "RBRACK", | ||||
| 	RBRACE: "RBRACE", | ||||
| } | ||||
|  | ||||
| // String returns the string corresponding to the token tok. | ||||
| func (t Type) String() string { | ||||
| 	s := "" | ||||
| 	if 0 <= t && t < Type(len(tokens)) { | ||||
| 		s = tokens[t] | ||||
| 	} | ||||
| 	if s == "" { | ||||
| 		s = "token(" + strconv.Itoa(int(t)) + ")" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| // IsIdentifier returns true for tokens corresponding to identifiers and basic | ||||
| // type literals; it returns false otherwise. | ||||
| func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } | ||||
|  | ||||
| // IsLiteral returns true for tokens corresponding to basic type literals; it | ||||
| // returns false otherwise. | ||||
| func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } | ||||
|  | ||||
| // IsOperator returns true for tokens corresponding to operators and | ||||
| // delimiters; it returns false otherwise. | ||||
| func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } | ||||
|  | ||||
| // String returns the token's literal text. Note that this is only | ||||
| // applicable for certain token types, such as token.IDENT, | ||||
| // token.STRING, etc.. | ||||
| func (t Token) String() string { | ||||
| 	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) | ||||
| } | ||||
|  | ||||
| // HCLToken converts this token to an HCL token. | ||||
| // | ||||
| // The token type must be a literal type or this will panic. | ||||
| func (t Token) HCLToken() hcltoken.Token { | ||||
| 	switch t.Type { | ||||
| 	case BOOL: | ||||
| 		return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} | ||||
| 	case FLOAT: | ||||
| 		return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} | ||||
| 	case NULL: | ||||
| 		return hcltoken.Token{Type: hcltoken.STRING, Text: ""} | ||||
| 	case NUMBER: | ||||
| 		return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} | ||||
| 	case STRING: | ||||
| 		return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} | ||||
| 	default: | ||||
| 		panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) | ||||
| 	} | ||||
| } | ||||
		Reference in New Issue
	
	Block a user