add vendor
This commit is contained in:
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
y.output
|
||||
|
||||
# ignore intellij files
|
||||
.idea
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
*.test
|
||||
12
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
||||
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,354 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
updatedeps:
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
.PHONY: default generate test updatedeps
|
||||
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
# HCL
|
||||
|
||||
[](https://godoc.org/github.com/hashicorp/hcl) [](https://travis-ci.org/hashicorp/hcl)
|
||||
|
||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
||||
that is both human and machine friendly for use with command-line tools, but
|
||||
specifically targeted towards DevOps tools, servers, etc.
|
||||
|
||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
||||
valid input to a system expecting HCL. This helps makes systems
|
||||
interoperable with other systems.
|
||||
|
||||
HCL is heavily inspired by
|
||||
[libucl](https://github.com/vstakhov/libucl),
|
||||
nginx configuration, and others similar.
|
||||
|
||||
## Why?
|
||||
|
||||
A common question when viewing HCL is to ask the question: why not
|
||||
JSON, YAML, etc.?
|
||||
|
||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
||||
used a variety of configuration languages from full programming languages
|
||||
such as Ruby to complete data structure languages such as JSON. What we
|
||||
learned is that some people wanted human-friendly configuration languages
|
||||
and some people wanted machine-friendly languages.
|
||||
|
||||
JSON fits a nice balance in this, but is fairly verbose and most
|
||||
importantly doesn't support comments. With YAML, we found that beginners
|
||||
had a really hard time determining what the actual structure was, and
|
||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||
in order to represent some configuration key.
|
||||
|
||||
Full programming languages such as Ruby enable complex behavior
|
||||
a configuration language shouldn't usually allow, and also forces
|
||||
people to learn some set of Ruby.
|
||||
|
||||
Because of this, we decided to create our own configuration language
|
||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
||||
to be written and modified by humans. The API for HCL allows JSON
|
||||
as an input so that it is also machine-friendly (machines can generate
|
||||
JSON instead of trying to generate HCL).
|
||||
|
||||
Our goal with HCL is not to alienate other configuration languages.
|
||||
It is instead to provide HCL as a specialized language for our tools,
|
||||
and JSON as the interoperability layer.
|
||||
|
||||
## Syntax
|
||||
|
||||
For a complete grammar, please see the parser itself. A high-level overview
|
||||
of the syntax and grammar is listed here.
|
||||
|
||||
* Single line comments start with `#` or `//`
|
||||
|
||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
||||
are not allowed. A multi-line comment (also known as a block comment)
|
||||
terminates at the first `*/` found.
|
||||
|
||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
||||
matter). The value can be any primitive: a string, number, boolean,
|
||||
object, or list.
|
||||
|
||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
||||
Example: `"Hello, World"`
|
||||
|
||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
||||
Any text may be used in place of `EOF`. Example:
|
||||
```
|
||||
<<FOO
|
||||
hello
|
||||
world
|
||||
FOO
|
||||
```
|
||||
|
||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
||||
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
```
|
||||
variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to:
|
||||
|
||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
||||
and syntax that HCL was based off of.
|
||||
|
||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
||||
in pure Go (no goyacc) and support for a printer.
|
||||
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2015
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf false
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -t ./...
|
||||
|
||||
build_script:
|
||||
- cmd: go test -v ./...
|
||||
724
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
724
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,724 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// This is the tag to use with structures to have settings for HCL
|
||||
const tagName = "hcl"
|
||||
|
||||
var (
|
||||
// nodeType holds a reference to the type of ast.Node
|
||||
nodeType reflect.Type = findNodeType()
|
||||
)
|
||||
|
||||
// Unmarshal accepts a byte slice as input and writes the
|
||||
// data to the value pointed to by v.
|
||||
func Unmarshal(bs []byte, v interface{}) error {
|
||||
root, err := parse(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(v, root)
|
||||
}
|
||||
|
||||
// Decode reads the given input and decodes it into the structure
|
||||
// given by `out`.
|
||||
func Decode(out interface{}, in string) error {
|
||||
obj, err := Parse(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(out, obj)
|
||||
}
|
||||
|
||||
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||
// raw Object into the given output.
|
||||
func DecodeObject(out interface{}, n ast.Node) error {
|
||||
val := reflect.ValueOf(out)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
// If we have the file, we really decode the root node
|
||||
if f, ok := n.(*ast.File); ok {
|
||||
n = f.Node
|
||||
}
|
||||
|
||||
var d decoder
|
||||
return d.decode("root", n, val.Elem())
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
stack []reflect.Kind
|
||||
}
|
||||
|
||||
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||
k := result
|
||||
|
||||
// If we have an interface with a valid value, we use that
|
||||
// for the check.
|
||||
if result.Kind() == reflect.Interface {
|
||||
elem := result.Elem()
|
||||
if elem.IsValid() {
|
||||
k = elem
|
||||
}
|
||||
}
|
||||
|
||||
// Push current onto stack unless it is an interface.
|
||||
if k.Kind() != reflect.Interface {
|
||||
d.stack = append(d.stack, k.Kind())
|
||||
|
||||
// Schedule a pop
|
||||
defer func() {
|
||||
d.stack = d.stack[:len(d.stack)-1]
|
||||
}()
|
||||
}
|
||||
|
||||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
return d.decodeInterface(name, node, result)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(name, node, result)
|
||||
case reflect.Ptr:
|
||||
return d.decodePtr(name, node, result)
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(name, node, result)
|
||||
case reflect.String:
|
||||
return d.decodeString(name, node, result)
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(name, node, result)
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.BOOL {
|
||||
v, err := strconv.ParseBool(n.Token.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||
// Very useful in situations where we want to preserve ast.Node information
|
||||
// like Pos
|
||||
if result.Type() == nodeType && result.CanSet() {
|
||||
result.Set(reflect.ValueOf(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
var set reflect.Value
|
||||
redecode := true
|
||||
|
||||
// For testing types, ObjectType should just be treated as a list. We
|
||||
// set this to a temporary var because we want to pass in the real node.
|
||||
testNode := node
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
testNode = ot.List
|
||||
}
|
||||
|
||||
switch n := testNode.(type) {
|
||||
case *ast.ObjectList:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||
set = result
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||
set = result
|
||||
}
|
||||
case *ast.ListType:
|
||||
var temp []interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||
set = result
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.BOOL:
|
||||
var result bool
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.FLOAT:
|
||||
var result float64
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.NUMBER:
|
||||
var result int
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.STRING, token.HEREDOC:
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"%s: cannot decode into interface: %T",
|
||||
name, node)
|
||||
}
|
||||
|
||||
// Set the result to what its supposed to be, then reset
|
||||
// result so we don't reflect into this method anymore.
|
||||
result.Set(set)
|
||||
|
||||
if redecode {
|
||||
// Revisit the node so that we can use the newly instantiated
|
||||
// thing and populate it.
|
||||
if err := d.decode(name, node, result); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||
if item, ok := node.(*ast.ObjectItem); ok {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
n, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
resultKeyType := resultType.Key()
|
||||
if resultKeyType.Kind() != reflect.String {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map if it is nil
|
||||
resultMap := result
|
||||
if result.IsNil() {
|
||||
resultMap = reflect.MakeMap(
|
||||
reflect.MapOf(resultKeyType, resultElemType))
|
||||
}
|
||||
|
||||
// Go through each element and decode it.
|
||||
done := make(map[string]struct{})
|
||||
for _, item := range n.Items {
|
||||
if item.Val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// github.com/hashicorp/terraform/issue/5740
|
||||
if len(item.Keys) == 0 {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the key we're dealing with, which is the first item
|
||||
keyStr := item.Keys[0].Token.Value().(string)
|
||||
|
||||
// If we've already processed this key, then ignore it
|
||||
if _, ok := done[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the value. If we have more than one key, then we
|
||||
// get the objectlist of only these keys.
|
||||
itemVal := item.Val
|
||||
if len(item.Keys) > 1 {
|
||||
itemVal = n.Filter(keyStr)
|
||||
done[keyStr] = struct{}{}
|
||||
}
|
||||
|
||||
// Make the field name
|
||||
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||
|
||||
// Get the key/value as reflection values
|
||||
key := reflect.ValueOf(keyStr)
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// If we have a pre-existing value in the map, use that
|
||||
oldVal := resultMap.MapIndex(key)
|
||||
if oldVal.IsValid() {
|
||||
val.Set(oldVal)
|
||||
}
|
||||
|
||||
// Decode!
|
||||
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the value on the map
|
||||
resultMap.SetMapIndex(key, val)
|
||||
}
|
||||
|
||||
// Set the final map if we can
|
||||
set.Set(resultMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
val := reflect.New(resultElemType)
|
||||
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
if result.IsNil() {
|
||||
resultSliceType := reflect.SliceOf(resultElemType)
|
||||
result = reflect.MakeSlice(
|
||||
resultSliceType, 0, 0)
|
||||
}
|
||||
|
||||
// Figure out the items we'll be copying into the slice
|
||||
var items []ast.Node
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
items = make([]ast.Node, len(n.Items))
|
||||
for i, item := range n.Items {
|
||||
items[i] = item
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
items = []ast.Node{n}
|
||||
case *ast.ListType:
|
||||
items = n.List
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append it onto the slice
|
||||
result = reflect.Append(result, val)
|
||||
}
|
||||
|
||||
set.Set(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||
return nil
|
||||
case token.STRING, token.HEREDOC:
|
||||
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||
var item *ast.ObjectItem
|
||||
if it, ok := node.(*ast.ObjectItem); ok {
|
||||
item = it
|
||||
node = it.Val
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
// Handle the special case where the object itself is a literal. Previously
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = result
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
fields := make(map[*reflect.StructField]reflect.Value)
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||
fieldType.Name, fieldKind),
|
||||
}
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
structs = append(
|
||||
structs, result.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields[&fieldType] = structVal.Field(i)
|
||||
}
|
||||
}
|
||||
|
||||
usedKeys := make(map[string]struct{})
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
for fieldType, field := range fields {
|
||||
if !field.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !field.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := fieldType.Name
|
||||
|
||||
tagValue := fieldType.Tag.Get(tagName)
|
||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||
if len(tagParts) >= 2 {
|
||||
switch tagParts[1] {
|
||||
case "decodedFields":
|
||||
decodedFieldsVal = append(decodedFieldsVal, field)
|
||||
continue
|
||||
case "key":
|
||||
if item == nil {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||
name, fieldName),
|
||||
}
|
||||
}
|
||||
|
||||
field.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
unusedKeysVal = append(unusedKeysVal, field)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagParts[0] != "" {
|
||||
fieldName = tagParts[0]
|
||||
}
|
||||
|
||||
// Determine the element we'll use to decode. If it is a single
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
if len(prefixMatches.Items) > 0 {
|
||||
if err := d.decode(fieldName, prefixMatches, field); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, match := range matches.Items {
|
||||
var decodeNode ast.Node = match.Val
|
||||
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, decodeNode, field); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
decodedFields = append(decodedFields, fieldType.Name)
|
||||
}
|
||||
|
||||
if len(decodedFieldsVal) > 0 {
|
||||
// Sort it so that it is deterministic
|
||||
sort.Strings(decodedFields)
|
||||
|
||||
for _, v := range decodedFieldsVal {
|
||||
v.Set(reflect.ValueOf(decodedFields))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNodeType returns the type of ast.Node
|
||||
func findNodeType() reflect.Type {
|
||||
var nodeContainer struct {
|
||||
Node ast.Node
|
||||
}
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
||||
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Package hcl decodes HCL into usable Go structures.
|
||||
//
|
||||
// hcl input can come in either pure HCL format or JSON format.
|
||||
// It can be parsed into an AST, and then decoded into a structure,
|
||||
// or it can be decoded directly from a string into a structure.
|
||||
//
|
||||
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||
// can write custom visitor implementations to implement custom
|
||||
// semantic checks. By default, HCL does not perform any semantic
|
||||
// checks.
|
||||
package hcl
|
||||
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
// Package ast declares the types used to represent syntax trees for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Node is an element in the abstract syntax tree.
|
||||
type Node interface {
|
||||
node()
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func (File) node() {}
|
||||
func (ObjectList) node() {}
|
||||
func (ObjectKey) node() {}
|
||||
func (ObjectItem) node() {}
|
||||
func (Comment) node() {}
|
||||
func (CommentGroup) node() {}
|
||||
func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
Comments []*CommentGroup // list of all comments in the source
|
||||
}
|
||||
|
||||
func (f *File) Pos() token.Pos {
|
||||
return f.Node.Pos()
|
||||
}
|
||||
|
||||
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||
// ObjectList.
|
||||
type ObjectList struct {
|
||||
Items []*ObjectItem
|
||||
}
|
||||
|
||||
func (o *ObjectList) Add(item *ObjectItem) {
|
||||
o.Items = append(o.Items, item)
|
||||
}
|
||||
|
||||
// Filter filters out the objects with the given key list as a prefix.
|
||||
//
|
||||
// The returned list of objects contain ObjectItems where the keys have
|
||||
// this prefix already stripped off. This might result in objects with
|
||||
// zero-length key lists if they have no children.
|
||||
//
|
||||
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
// If there aren't enough keys, then ignore this
|
||||
if len(item.Keys) < len(keys) {
|
||||
continue
|
||||
}
|
||||
|
||||
match := true
|
||||
for i, key := range item.Keys[:len(keys)] {
|
||||
key := key.Token.Value().(string)
|
||||
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off the prefix from the children
|
||||
newItem := *item
|
||||
newItem.Keys = newItem.Keys[len(keys):]
|
||||
result.Add(&newItem)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Children returns further nested objects (key length > 0) within this
|
||||
// ObjectList. This should be used with Filter to get at child items.
|
||||
func (o *ObjectList) Children() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) > 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Elem returns items in the list that are direct element assignments
|
||||
// (key length == 0). This should be used with Filter to get at elements.
|
||||
func (o *ObjectList) Elem() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) == 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||
type ObjectItem struct {
|
||||
// keys is only one length long if it's of type assignment. If it's a
|
||||
// nested object it can be larger than one. In that case "assign" is
|
||||
// invalid as there is no assignments for a nested object.
|
||||
Keys []*ObjectKey
|
||||
|
||||
// assign contains the position of "=", if any
|
||||
Assign token.Pos
|
||||
|
||||
// val is the item itself. It can be an object,list, number, bool or a
|
||||
// string. If key length is larger than one, val can be only of type
|
||||
// Object.
|
||||
Val Node
|
||||
|
||||
LeadComment *CommentGroup // associated lead comment
|
||||
LineComment *CommentGroup // associated line comment
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectKeys are either an identifier or of type string.
|
||||
type ObjectKey struct {
|
||||
Token token.Token
|
||||
}
|
||||
|
||||
func (o *ObjectKey) Pos() token.Pos {
|
||||
return o.Token.Pos
|
||||
}
|
||||
|
||||
// LiteralType represents a literal of basic type. Valid types are:
|
||||
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
func (l *LiteralType) Pos() token.Pos {
|
||||
return l.Token.Pos
|
||||
}
|
||||
|
||||
// ListStatement represents a HCL List type
|
||||
type ListType struct {
|
||||
Lbrack token.Pos // position of "["
|
||||
Rbrack token.Pos // position of "]"
|
||||
List []Node // the elements in lexical order
|
||||
}
|
||||
|
||||
func (l *ListType) Pos() token.Pos {
|
||||
return l.Lbrack
|
||||
}
|
||||
|
||||
func (l *ListType) Add(node Node) {
|
||||
l.List = append(l.List, node)
|
||||
}
|
||||
|
||||
// ObjectType represents a HCL Object Type
|
||||
type ObjectType struct {
|
||||
Lbrace token.Pos // position of "{"
|
||||
Rbrace token.Pos // position of "}"
|
||||
List *ObjectList // the nodes in lexical order
|
||||
}
|
||||
|
||||
func (o *ObjectType) Pos() token.Pos {
|
||||
return o.Lbrace
|
||||
}
|
||||
|
||||
// Comment node represents a single //, # style or /*- style commment
|
||||
type Comment struct {
|
||||
Start token.Pos // position of / or #
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c *Comment) Pos() token.Pos {
|
||||
return c.Start
|
||||
}
|
||||
|
||||
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||
// no empty lines between.
|
||||
type CommentGroup struct {
|
||||
List []*Comment // len(List) > 0
|
||||
}
|
||||
|
||||
func (c *CommentGroup) Pos() token.Pos {
|
||||
return c.List[0].Pos()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||
// bool is false.
|
||||
type WalkFunc func(Node) (Node, bool)
|
||||
|
||||
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||
// returned node of fn can be used to rewrite the passed node to fn.
|
||||
func Walk(node Node, fn WalkFunc) Node {
|
||||
rewritten, ok := fn(node)
|
||||
if !ok {
|
||||
return rewritten
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *File:
|
||||
n.Node = Walk(n.Node, fn)
|
||||
case *ObjectList:
|
||||
for i, item := range n.Items {
|
||||
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||
}
|
||||
case *ObjectKey:
|
||||
// nothing to do
|
||||
case *ObjectItem:
|
||||
for i, k := range n.Keys {
|
||||
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||
}
|
||||
|
||||
if n.Val != nil {
|
||||
n.Val = Walk(n.Val, fn)
|
||||
}
|
||||
case *LiteralType:
|
||||
// nothing to do
|
||||
case *ListType:
|
||||
for i, l := range n.List {
|
||||
n.List[i] = Walk(l, fn)
|
||||
}
|
||||
case *ObjectType:
|
||||
n.List = Walk(n.List, fn).(*ObjectList)
|
||||
default:
|
||||
// should we panic here?
|
||||
fmt.Printf("unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
fn(nil)
|
||||
return rewritten
|
||||
}
|
||||
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// PosError is a parse error that contains a position.
|
||||
type PosError struct {
|
||||
Pos token.Pos
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PosError) Error() string {
|
||||
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||
}
|
||||
520
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
520
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
Normal file
@@ -0,0 +1,520 @@
|
||||
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||
// Language)
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
comments []*ast.CommentGroup
|
||||
leadComment *ast.CommentGroup // last lead comment
|
||||
lineComment *ast.CommentGroup // last line comment
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
// normalize all line endings
|
||||
// since the scanner and output only work with "\n" line endings, we may
|
||||
// end up with dangling "\r" characters in the parsed data.
|
||||
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Comments = p.comments
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
// count the endline if it's multiline comment, ie starting with /*
|
||||
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||
// don't use range here - no need to decode Unicode code points
|
||||
for i := 0; i < len(p.tok.Text); i++ {
|
||||
if p.tok.Text[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||
p.tok = p.sc.Scan()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||
var list []*ast.Comment
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||
var comment *ast.Comment
|
||||
comment, endline = p.consumeComment()
|
||||
list = append(list, comment)
|
||||
}
|
||||
|
||||
// add comment group to the comments list
|
||||
comments = &ast.CommentGroup{List: list}
|
||||
p.comments = append(p.comments, comments)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
if p.leadComment != nil {
|
||||
o.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.ASSIGN:
|
||||
o.Assign = p.tok.Pos
|
||||
o.Val, err = p.object()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case token.LBRACE:
|
||||
o.Val, err = p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " "))
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||
o.LineComment = p.lineComment
|
||||
p.lineComment = nil
|
||||
}
|
||||
p.unscan()
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
if keyCount > 1 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
if keyCount == 0 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: errors.New("no object keys found!"),
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("illegal character"),
|
||||
}
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.COMMENT:
|
||||
// implement comment
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||
}
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
|
||||
}
|
||||
|
||||
o.List = l
|
||||
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{
|
||||
Lbrack: p.tok.Pos,
|
||||
}
|
||||
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
l.List[len(l.List)-1] = lit
|
||||
p.lineComment = nil
|
||||
}
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.LBRACK:
|
||||
node, err := p.listType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse list within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
return l, nil
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead. In the process, it collects any
|
||||
// comment groups encountered, and remembers the last lead and line comments.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||
// in case we unscan later.
|
||||
prev := p.tok
|
||||
p.tok = p.sc.Scan()
|
||||
|
||||
if p.tok.Type == token.COMMENT {
|
||||
var comment *ast.CommentGroup
|
||||
var endline int
|
||||
|
||||
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||
if p.tok.Pos.Line == prev.Pos.Line {
|
||||
// The comment is on same line as the previous token; it
|
||||
// cannot be a lead comment but may be a line comment.
|
||||
comment, endline = p.consumeCommentGroup(0)
|
||||
if p.tok.Pos.Line != endline {
|
||||
// The next token is on a different line, thus
|
||||
// the last comment group is a line comment.
|
||||
p.lineComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
// consume successor comments, if any
|
||||
endline = -1
|
||||
for p.tok.Type == token.COMMENT {
|
||||
comment, endline = p.consumeCommentGroup(1)
|
||||
}
|
||||
|
||||
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||
switch p.tok.Type {
|
||||
case token.RBRACE, token.RBRACK:
|
||||
// Do not count for these cases
|
||||
default:
|
||||
// The next token is following on the line immediately after the
|
||||
// comment group, thus the last comment group is a lead comment.
|
||||
p.leadComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
||||
651
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
651
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,651 @@
|
||||
// Package scanner implements a scanner for HCL (HashiCorp Configuration
|
||||
// Language) source text.
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// If we see a null character with data left, then that is an error
|
||||
if ch == '\x00' && s.buf.Len() > 0 {
|
||||
s.err("unexpected null character (0x00)")
|
||||
return eof
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
tok = token.IDENT
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '#', '/':
|
||||
tok = token.COMMENT
|
||||
s.scanComment(ch)
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '<':
|
||||
tok = token.HEREDOC
|
||||
s.scanHeredoc()
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case '=':
|
||||
tok = token.ASSIGN
|
||||
case '+':
|
||||
tok = token.ADD
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
tok = token.SUB
|
||||
}
|
||||
default:
|
||||
s.err("illegal char")
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment(ch rune) {
|
||||
// single line comments
|
||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||
if ch == '/' && s.peek() != '/' {
|
||||
s.err("expected '/' for comment")
|
||||
return
|
||||
}
|
||||
|
||||
ch = s.next()
|
||||
for ch != '\n' && ch >= 0 && ch != eof {
|
||||
ch = s.next()
|
||||
}
|
||||
if ch != eof && ch >= 0 {
|
||||
s.unread()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// be sure we get the character after /* This allows us to find comment's
|
||||
// that are not erminated
|
||||
if ch == '/' {
|
||||
s.next()
|
||||
ch = s.next() // read character after "/*"
|
||||
}
|
||||
|
||||
// look for /* - style comments
|
||||
for {
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("comment not terminated")
|
||||
break
|
||||
}
|
||||
|
||||
ch0 := ch
|
||||
ch = s.next()
|
||||
if ch0 == '*' && ch == '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
if ch == '0' {
|
||||
// check for hexadecimal, octal or float
|
||||
ch = s.next()
|
||||
if ch == 'x' || ch == 'X' {
|
||||
// hexadecimal
|
||||
ch = s.next()
|
||||
found := false
|
||||
for isHexadecimal(ch) {
|
||||
ch = s.next()
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
s.err("illegal hexadecimal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// now it's either something like: 0421(octal) or 0.1231(float)
|
||||
illegalOctal := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
if ch == '8' || ch == '9' {
|
||||
// this is just a possibility. For example 0159 is illegal, but
|
||||
// 0159.23 is valid. So we mark a possible illegal octal. If
|
||||
// the next character is not a period, we'll print the error.
|
||||
illegalOctal = true
|
||||
}
|
||||
}
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if illegalOctal {
|
||||
s.err("illegal octal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanHeredoc scans a heredoc string
|
||||
func (s *Scanner) scanHeredoc() {
|
||||
// Scan the second '<' in example: '<<EOF'
|
||||
if s.next() != '<' {
|
||||
s.err("heredoc expected second '<', didn't see it")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the original offset so we can read just the heredoc ident
|
||||
offs := s.srcPos.Offset
|
||||
|
||||
// Scan the identifier
|
||||
ch := s.next()
|
||||
|
||||
// Indented heredoc syntax
|
||||
if ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
for isLetter(ch) || isDigit(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
// If we reached an EOF then that is not good
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore the '\r' in Windows line endings
|
||||
if ch == '\r' {
|
||||
if s.peek() == '\n' {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't reach a newline then that is also not good
|
||||
if ch != '\n' {
|
||||
s.err("invalid characters in heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
// Read the identifier
|
||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||
if len(identBytes) == 0 {
|
||||
s.err("zero-length heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
var identRegexp *regexp.Regexp
|
||||
if identBytes[0] == '-' {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
||||
} else {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
||||
}
|
||||
|
||||
// Read the actual string value
|
||||
lineStart := s.srcPos.Offset
|
||||
for {
|
||||
ch := s.next()
|
||||
|
||||
// Special newline handling.
|
||||
if ch == '\n' {
|
||||
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||
// of seeing the same identifier - if the length is less than the number of bytes
|
||||
// in the identifier, this cannot be a valid terminator.
|
||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
break
|
||||
}
|
||||
|
||||
// Not an anchor match, record the start of a new line
|
||||
lineStart = s.srcPos.Offset
|
||||
}
|
||||
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' && braces == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
start := n
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
if ch == eof {
|
||||
// If we see an EOF, we halt any more scanning of digits
|
||||
// immediately.
|
||||
break
|
||||
}
|
||||
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
if n != start {
|
||||
// we scanned all digits, put the last non digit char back,
|
||||
// only if we read anything at all
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isDigit returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isDecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
package strconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ErrSyntax indicates that a value does not have the right syntax for the target type.
|
||||
var ErrSyntax = errors.New("invalid syntax")
|
||||
|
||||
// Unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
func Unquote(s string) (t string, err error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote != '"' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||
switch quote {
|
||||
case '"':
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
// If we're starting a '${}' then let it through un-unquoted.
|
||||
// Specifically: we don't unquote any characters within the `${}`
|
||||
// section.
|
||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||
buf = append(buf, '$', '{')
|
||||
s = s[2:]
|
||||
|
||||
// Continue reading until we find the closing brace, copying as-is
|
||||
braces := 1
|
||||
for len(s) > 0 && braces > 0 {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
s = s[size:]
|
||||
|
||||
n := utf8.EncodeRune(runeTmp[:], r)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
|
||||
switch r {
|
||||
case '{':
|
||||
braces++
|
||||
case '}':
|
||||
braces--
|
||||
}
|
||||
}
|
||||
if braces != 0 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// If there's no string left, we're done!
|
||||
break
|
||||
} else {
|
||||
// If there's more left, we need to pop back up to the top of the loop
|
||||
// in case there's another interpolation in this string.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if s[0] == '\n' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", ErrSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"'):
|
||||
err = ErrSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"':
|
||||
if c != quote {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
||||
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
// Package token defines constants representing the lexical tokens for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
COMMENT
|
||||
|
||||
identifier_beg
|
||||
IDENT // literals
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
HEREDOC // <<FOO\nbar\nFOO
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
ASSIGN // =
|
||||
ADD // +
|
||||
SUB // -
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
COMMENT: "COMMENT",
|
||||
|
||||
IDENT: "IDENT",
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
HEREDOC: "HEREDOC",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
|
||||
ASSIGN: "ASSIGN",
|
||||
ADD: "ADD",
|
||||
SUB: "SUB",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// Value returns the properly typed value for this token. The type of
|
||||
// the returned interface{} is guaranteed based on the Type field.
|
||||
//
|
||||
// This can only be called for literal types. If it is called for any other
|
||||
// type, this will panic.
|
||||
func (t Token) Value() interface{} {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
if t.Text == "true" {
|
||||
return true
|
||||
} else if t.Text == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
panic("unknown bool value: " + t.Text)
|
||||
case FLOAT:
|
||||
v, err := strconv.ParseFloat(t.Text, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return float64(v)
|
||||
case NUMBER:
|
||||
v, err := strconv.ParseInt(t.Text, 0, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int64(v)
|
||||
case IDENT:
|
||||
return t.Text
|
||||
case HEREDOC:
|
||||
return unindentHeredoc(t.Text)
|
||||
case STRING:
|
||||
// Determine the Unquote method to use. If it came from JSON,
|
||||
// then we need to use the built-in unquote since we have to
|
||||
// escape interpolations there.
|
||||
f := hclstrconv.Unquote
|
||||
if t.JSON {
|
||||
f = strconv.Unquote
|
||||
}
|
||||
|
||||
// This case occurs if json null is used
|
||||
if t.Text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
v, err := f(t.Text)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||
}
|
||||
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||
func unindentHeredoc(heredoc string) string {
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(heredoc, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
unindent := heredoc[2] == '-'
|
||||
|
||||
// We can optimize if the heredoc isn't marked for indentation
|
||||
if !unindent {
|
||||
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||
}
|
||||
|
||||
// We need to unindent each line based on the indentation level of the marker
|
||||
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||
whitespacePrefix := lines[len(lines)-1]
|
||||
|
||||
isIndented := true
|
||||
for _, v := range lines {
|
||||
if strings.HasPrefix(v, whitespacePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
isIndented = false
|
||||
break
|
||||
}
|
||||
|
||||
// If all lines are not at least as indented as the terminating mark, return the
|
||||
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||
if !isIndented {
|
||||
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||
}
|
||||
|
||||
unindentedLines := make([]string, len(lines))
|
||||
for k, v := range lines {
|
||||
if k == len(lines)-1 {
|
||||
unindentedLines[k] = ""
|
||||
break
|
||||
}
|
||||
|
||||
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||
}
|
||||
|
||||
return strings.Join(unindentedLines, "\n")
|
||||
}
|
||||
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
package parser
|
||||
|
||||
import "github.com/hashicorp/hcl/hcl/ast"
|
||||
|
||||
// flattenObjects takes an AST node, walks it, and flattens
|
||||
func flattenObjects(node ast.Node) {
|
||||
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
|
||||
// We only care about lists, because this is what we modify
|
||||
list, ok := n.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return n, true
|
||||
}
|
||||
|
||||
// Rebuild the item list
|
||||
items := make([]*ast.ObjectItem, 0, len(list.Items))
|
||||
frontier := make([]*ast.ObjectItem, len(list.Items))
|
||||
copy(frontier, list.Items)
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current item
|
||||
n := len(frontier)
|
||||
item := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
switch v := item.Val.(type) {
|
||||
case *ast.ObjectType:
|
||||
items, frontier = flattenObjectType(v, item, items, frontier)
|
||||
case *ast.ListType:
|
||||
items, frontier = flattenListType(v, item, items, frontier)
|
||||
default:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse the list since the frontier model runs things backwards
|
||||
for i := len(items)/2 - 1; i >= 0; i-- {
|
||||
opp := len(items) - 1 - i
|
||||
items[i], items[opp] = items[opp], items[i]
|
||||
}
|
||||
|
||||
// Done! Set the original items
|
||||
list.Items = items
|
||||
return n, true
|
||||
})
|
||||
}
|
||||
|
||||
func flattenListType(
|
||||
ot *ast.ListType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list is empty, keep the original list
|
||||
if len(ot.List) == 0 {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List {
|
||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, elem := range ot.List {
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: item.Keys,
|
||||
Assign: item.Assign,
|
||||
Val: elem,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
func flattenObjectType(
|
||||
ot *ast.ObjectType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list has no items we do not have to flatten anything
|
||||
if ot.List.Items == nil {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List.Items {
|
||||
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, subitem := range ot.List.Items {
|
||||
// Copy the new key
|
||||
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
|
||||
copy(keys, item.Keys)
|
||||
copy(keys[len(item.Keys):], subitem.Keys)
|
||||
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
Assign: item.Assign,
|
||||
Val: subitem.Val,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
"github.com/hashicorp/hcl/json/scanner"
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = fmt.Errorf("%s: %s", pos, msg)
|
||||
}
|
||||
|
||||
// The root must be an object in JSON
|
||||
object, err := p.object()
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We make our final node an object list so it is more HCL compatible
|
||||
f.Node = object.List
|
||||
|
||||
// Flatten it, which finds patterns and turns them into more HCL-like
|
||||
// AST trees.
|
||||
flattenObjects(f.Node)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// Check for a followup comma. If it isn't a comma, then we're done
|
||||
if tok := p.scan(); tok.Type != token.COMMA {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.COLON:
|
||||
pos := p.tok.Pos
|
||||
o.Assign = hcltoken.Pos{
|
||||
Filename: pos.Filename,
|
||||
Offset: pos.Offset,
|
||||
Line: pos.Line,
|
||||
Column: pos.Column,
|
||||
}
|
||||
|
||||
o.Val, err = p.objectValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
case token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{
|
||||
Token: p.tok.HCLToken(),
|
||||
})
|
||||
case token.COLON:
|
||||
// If we have a zero keycount it means that we never got
|
||||
// an object key, i.e. `{ :`. This is a syntax error.
|
||||
if keyCount == 0 {
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
// Done
|
||||
return keys, nil
|
||||
case token.ILLEGAL:
|
||||
return nil, errors.New("illegal")
|
||||
default:
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) objectValue() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseObjectValue"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{}
|
||||
|
||||
l, err := p.objectList()
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.List = l
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{}
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.COMMA:
|
||||
continue
|
||||
case token.LBRACE:
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
return l, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok.HCLToken(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
p.tok = p.sc.Scan()
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
||||
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,451 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
} else if lit == "null" {
|
||||
tok = token.NULL
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case ':':
|
||||
tok = token.COLON
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
default:
|
||||
s.err("illegal char: " + string(ch))
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
zero := ch == '0'
|
||||
pos := s.srcPos
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
// If we have a larger number and this is zero, error
|
||||
if zero && pos != s.srcPos {
|
||||
s.err("numbers cannot start with 0")
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa begining from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch == '\n' || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
// we scanned all digits, put the last non digit char back
|
||||
s.unread()
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
||||
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
|
||||
identifier_beg
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
NULL // null
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
COLON // :
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
NULL: "NULL",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
COLON: "COLON",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// HCLToken converts this token to an HCL token.
|
||||
//
|
||||
// The token type must be a literal type or this will panic.
|
||||
func (t Token) HCLToken() hcltoken.Token {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
|
||||
case FLOAT:
|
||||
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
|
||||
case NULL:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
|
||||
case NUMBER:
|
||||
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
|
||||
case STRING:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
Normal file
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lexModeValue byte
|
||||
|
||||
const (
|
||||
lexModeUnknown lexModeValue = iota
|
||||
lexModeHcl
|
||||
lexModeJson
|
||||
)
|
||||
|
||||
// lexMode returns whether we're going to be parsing in JSON
|
||||
// mode or HCL mode.
|
||||
func lexMode(v []byte) lexModeValue {
|
||||
var (
|
||||
r rune
|
||||
w int
|
||||
offset int
|
||||
)
|
||||
|
||||
for {
|
||||
r, w = utf8.DecodeRune(v[offset:])
|
||||
offset += w
|
||||
if unicode.IsSpace(r) {
|
||||
continue
|
||||
}
|
||||
if r == '{' {
|
||||
return lexModeJson
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return lexModeHcl
|
||||
}
|
||||
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
Normal file
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hclParser "github.com/hashicorp/hcl/hcl/parser"
|
||||
jsonParser "github.com/hashicorp/hcl/json/parser"
|
||||
)
|
||||
|
||||
// ParseBytes accepts as input byte slice and returns ast tree.
|
||||
//
|
||||
// Input can be either JSON or HCL
|
||||
func ParseBytes(in []byte) (*ast.File, error) {
|
||||
return parse(in)
|
||||
}
|
||||
|
||||
// ParseString accepts input as a string and returns ast tree.
|
||||
func ParseString(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
|
||||
func parse(in []byte) (*ast.File, error) {
|
||||
switch lexMode(in) {
|
||||
case lexModeHcl:
|
||||
return hclParser.Parse(in)
|
||||
case lexModeJson:
|
||||
return jsonParser.Parse(in)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown config format")
|
||||
}
|
||||
|
||||
// Parse parses the given input and returns the root object.
|
||||
//
|
||||
// The input format can be either HCL or JSON.
|
||||
func Parse(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
23
vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md
generated
vendored
Normal file
23
vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# HCL Changelog
|
||||
|
||||
## v2.0.0 (Oct 2, 2019)
|
||||
|
||||
Initial release of HCL 2, which is a new implementating combining the HCL 1
|
||||
language with the HIL expression language to produce a single language
|
||||
supporting both nested configuration structures and arbitrary expressions.
|
||||
|
||||
HCL 2 has an entirely new Go library API and so is _not_ a drop-in upgrade
|
||||
relative to HCL 1. It's possible to import both versions of HCL into a single
|
||||
program using Go's _semantic import versioning_ mechanism:
|
||||
|
||||
```
|
||||
import (
|
||||
hcl1 "github.com/hashicorp/hcl"
|
||||
hcl2 "github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Prior to v2.0.0 there was not a curated changelog. Consult the git history
|
||||
from the latest v1.x.x tag for information on the changes to HCL 1.
|
||||
353
vendor/github.com/hashicorp/hcl/v2/LICENSE
generated
vendored
Normal file
353
vendor/github.com/hashicorp/hcl/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
204
vendor/github.com/hashicorp/hcl/v2/README.md
generated
vendored
Normal file
204
vendor/github.com/hashicorp/hcl/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
# HCL
|
||||
|
||||
HCL is a toolkit for creating structured configuration languages that are
|
||||
both human- and machine-friendly, for use with command-line tools.
|
||||
Although intended to be generally useful, it is primarily targeted
|
||||
towards devops tools, servers, etc.
|
||||
|
||||
> **NOTE:** This is major version 2 of HCL, whose Go API is incompatible with
|
||||
> major version 1. Both versions are available for selection in Go Modules
|
||||
> projects. HCL 2 _cannot_ be imported from Go projects that are not using Go Modules. For more information, see
|
||||
> [our version selection guide](https://github.com/golang/go/wiki/Version-Selection).
|
||||
|
||||
HCL has both a _native syntax_, intended to be pleasant to read and write for
|
||||
humans, and a JSON-based variant that is easier for machines to generate
|
||||
and parse.
|
||||
|
||||
The HCL native syntax is inspired by [libucl](https://github.com/vstakhov/libucl),
|
||||
[nginx configuration](http://nginx.org/en/docs/beginners_guide.html#conf_structure),
|
||||
and others.
|
||||
|
||||
It includes an expression syntax that allows basic inline computation and,
|
||||
with support from the calling application, use of variables and functions
|
||||
for more dynamic configuration languages.
|
||||
|
||||
HCL provides a set of constructs that can be used by a calling application to
|
||||
construct a configuration language. The application defines which attribute
|
||||
names and nested block types are expected, and HCL parses the configuration
|
||||
file, verifies that it conforms to the expected structure, and returns
|
||||
high-level objects that the application can use for further processing.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"github.com/hashicorp/hcl/v2/hclsimple"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
LogLevel string `hcl:"log_level"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
var config Config
|
||||
err := hclsimple.DecodeFile("config.hcl", nil, &config)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load configuration: %s", err)
|
||||
}
|
||||
log.Printf("Configuration is %#v", config)
|
||||
}
|
||||
```
|
||||
|
||||
A lower-level API is available for applications that need more control over
|
||||
the parsing, decoding, and evaluation of configuration.
|
||||
|
||||
## Why?
|
||||
|
||||
Newcomers to HCL often ask: why not JSON, YAML, etc?
|
||||
|
||||
Whereas JSON and YAML are formats for serializing data structures, HCL is
|
||||
a syntax and API specifically designed for building structured configuration
|
||||
formats.
|
||||
|
||||
HCL attempts to strike a compromise between generic serialization formats
|
||||
such as JSON and configuration formats built around full programming languages
|
||||
such as Ruby. HCL syntax is designed to be easily read and written by humans,
|
||||
and allows _declarative_ logic to permit its use in more complex applications.
|
||||
|
||||
HCL is intended as a base syntax for configuration formats built
|
||||
around key-value pairs and hierarchical blocks whose structure is well-defined
|
||||
by the calling application, and this definition of the configuration structure
|
||||
allows for better error messages and more convenient definition within the
|
||||
calling application.
|
||||
|
||||
It can't be denied that JSON is very convenient as a _lingua franca_
|
||||
for interoperability between different pieces of software. Because of this,
|
||||
HCL defines a common configuration model that can be parsed from either its
|
||||
native syntax or from a well-defined equivalent JSON structure. This allows
|
||||
configuration to be provided as a mixture of human-authored configuration
|
||||
files in the native syntax and machine-generated files in JSON.
|
||||
|
||||
## Information Model and Syntax
|
||||
|
||||
HCL is built around two primary concepts: _attributes_ and _blocks_. In
|
||||
native syntax, a configuration file for a hypothetical application might look
|
||||
something like this:
|
||||
|
||||
```hcl
|
||||
io_mode = "async"
|
||||
|
||||
service "http" "web_proxy" {
|
||||
listen_addr = "127.0.0.1:8080"
|
||||
|
||||
process "main" {
|
||||
command = ["/usr/local/bin/awesome-app", "server"]
|
||||
}
|
||||
|
||||
process "mgmt" {
|
||||
command = ["/usr/local/bin/awesome-app", "mgmt"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The JSON equivalent of this configuration is the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"io_mode": "async",
|
||||
"service": {
|
||||
"http": {
|
||||
"web_proxy": {
|
||||
"listen_addr": "127.0.0.1:8080",
|
||||
"process": {
|
||||
"main": {
|
||||
"command": ["/usr/local/bin/awesome-app", "server"]
|
||||
},
|
||||
"mgmt": {
|
||||
"command": ["/usr/local/bin/awesome-app", "mgmt"]
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Regardless of which syntax is used, the API within the calling application
|
||||
is the same. It can either work directly with the low-level attributes and
|
||||
blocks, for more advanced use-cases, or it can use one of the _decoder_
|
||||
packages to declaratively extract into either Go structs or dynamic value
|
||||
structures.
|
||||
|
||||
Attribute values can be expressions as well as just literal values:
|
||||
|
||||
```hcl
|
||||
# Arithmetic with literals and application-provided variables
|
||||
sum = 1 + addend
|
||||
|
||||
# String interpolation and templates
|
||||
message = "Hello, ${name}!"
|
||||
|
||||
# Application-provided functions
|
||||
shouty_message = upper(message)
|
||||
```
|
||||
|
||||
Although JSON syntax doesn't permit direct use of expressions, the interpolation
|
||||
syntax allows use of arbitrary expressions within JSON strings:
|
||||
|
||||
```json
|
||||
{
|
||||
"sum": "${1 + addend}",
|
||||
"message": "Hello, ${name}!",
|
||||
"shouty_message": "${upper(message)}"
|
||||
}
|
||||
```
|
||||
|
||||
For more information, see the detailed specifications:
|
||||
|
||||
* [Syntax-agnostic Information Model](hcl/spec.md)
|
||||
* [HCL Native Syntax](hcl/hclsyntax/spec.md)
|
||||
* [JSON Representation](hcl/json/spec.md)
|
||||
|
||||
## Changes in 2.0
|
||||
|
||||
Version 2.0 of HCL combines the features of HCL 1.0 with those of the
|
||||
interpolation language HIL to produce a single configuration language that
|
||||
supports arbitrary expressions.
|
||||
|
||||
This new version has a completely new parser and Go API, with no direct
|
||||
migration path. Although the syntax is similar, the implementation takes some
|
||||
very different approaches to improve on some "rough edges" that existed with
|
||||
the original implementation and to allow for more robust error handling.
|
||||
|
||||
It's possible to import both HCL 1 and HCL 2 into the same program using Go's
|
||||
_semantic import versioning_ mechanism:
|
||||
|
||||
```go
|
||||
import (
|
||||
hcl1 "github.com/hashicorp/hcl"
|
||||
hcl2 "github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
```
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
HCL was heavily inspired by [libucl](https://github.com/vstakhov/libucl),
|
||||
by [Vsevolod Stakhov](https://github.com/vstakhov).
|
||||
|
||||
HCL and HIL originate in [HashiCorp Terraform](https://terraform.io/),
|
||||
with the original parsers for each written by
|
||||
[Mitchell Hashimoto](https://github.com/mitchellh).
|
||||
|
||||
The original HCL parser was ported to pure Go (from yacc) by
|
||||
[Fatih Arslan](https://github.com/fatih). The structure-related portions of
|
||||
the new native syntax parser build on that work.
|
||||
|
||||
The original HIL parser was ported to pure Go (from yacc) by
|
||||
[Martin Atkins](https://github.com/apparentlymart). The expression-related
|
||||
portions of the new native syntax parser build on that work.
|
||||
|
||||
HCL 2, which merged the original HCL and HIL languages into this single new
|
||||
language, builds on design and prototyping work by
|
||||
[Martin Atkins](https://github.com/apparentlymart) in
|
||||
[zcl](https://github.com/zclconf/go-zcl).
|
||||
143
vendor/github.com/hashicorp/hcl/v2/diagnostic.go
generated
vendored
Normal file
143
vendor/github.com/hashicorp/hcl/v2/diagnostic.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DiagnosticSeverity represents the severity of a diagnostic.
|
||||
type DiagnosticSeverity int
|
||||
|
||||
const (
|
||||
// DiagInvalid is the invalid zero value of DiagnosticSeverity
|
||||
DiagInvalid DiagnosticSeverity = iota
|
||||
|
||||
// DiagError indicates that the problem reported by a diagnostic prevents
|
||||
// further progress in parsing and/or evaluating the subject.
|
||||
DiagError
|
||||
|
||||
// DiagWarning indicates that the problem reported by a diagnostic warrants
|
||||
// user attention but does not prevent further progress. It is most
|
||||
// commonly used for showing deprecation notices.
|
||||
DiagWarning
|
||||
)
|
||||
|
||||
// Diagnostic represents information to be presented to a user about an
|
||||
// error or anomoly in parsing or evaluating configuration.
|
||||
type Diagnostic struct {
|
||||
Severity DiagnosticSeverity
|
||||
|
||||
// Summary and Detail contain the English-language description of the
|
||||
// problem. Summary is a terse description of the general problem and
|
||||
// detail is a more elaborate, often-multi-sentence description of
|
||||
// the probem and what might be done to solve it.
|
||||
Summary string
|
||||
Detail string
|
||||
|
||||
// Subject and Context are both source ranges relating to the diagnostic.
|
||||
//
|
||||
// Subject is a tight range referring to exactly the construct that
|
||||
// is problematic, while Context is an optional broader range (which should
|
||||
// fully contain Subject) that ought to be shown around Subject when
|
||||
// generating isolated source-code snippets in diagnostic messages.
|
||||
// If Context is nil, the Subject is also the Context.
|
||||
//
|
||||
// Some diagnostics have no source ranges at all. If Context is set then
|
||||
// Subject should always also be set.
|
||||
Subject *Range
|
||||
Context *Range
|
||||
|
||||
// For diagnostics that occur when evaluating an expression, Expression
|
||||
// may refer to that expression and EvalContext may point to the
|
||||
// EvalContext that was active when evaluating it. This may allow for the
|
||||
// inclusion of additional useful information when rendering a diagnostic
|
||||
// message to the user.
|
||||
//
|
||||
// It is not always possible to select a single EvalContext for a
|
||||
// diagnostic, and so in some cases this field may be nil even when an
|
||||
// expression causes a problem.
|
||||
//
|
||||
// EvalContexts form a tree, so the given EvalContext may refer to a parent
|
||||
// which in turn refers to another parent, etc. For a full picture of all
|
||||
// of the active variables and functions the caller must walk up this
|
||||
// chain, preferring definitions that are "closer" to the expression in
|
||||
// case of colliding names.
|
||||
Expression Expression
|
||||
EvalContext *EvalContext
|
||||
}
|
||||
|
||||
// Diagnostics is a list of Diagnostic instances.
|
||||
type Diagnostics []*Diagnostic
|
||||
|
||||
// error implementation, so that diagnostics can be returned via APIs
|
||||
// that normally deal in vanilla Go errors.
|
||||
//
|
||||
// This presents only minimal context about the error, for compatibility
|
||||
// with usual expectations about how errors will present as strings.
|
||||
func (d *Diagnostic) Error() string {
|
||||
return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail)
|
||||
}
|
||||
|
||||
// error implementation, so that sets of diagnostics can be returned via
|
||||
// APIs that normally deal in vanilla Go errors.
|
||||
func (d Diagnostics) Error() string {
|
||||
count := len(d)
|
||||
switch {
|
||||
case count == 0:
|
||||
return "no diagnostics"
|
||||
case count == 1:
|
||||
return d[0].Error()
|
||||
default:
|
||||
return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1)
|
||||
}
|
||||
}
|
||||
|
||||
// Append appends a new error to a Diagnostics and return the whole Diagnostics.
|
||||
//
|
||||
// This is provided as a convenience for returning from a function that
|
||||
// collects and then returns a set of diagnostics:
|
||||
//
|
||||
// return nil, diags.Append(&hcl.Diagnostic{ ... })
|
||||
//
|
||||
// Note that this modifies the array underlying the diagnostics slice, so
|
||||
// must be used carefully within a single codepath. It is incorrect (and rude)
|
||||
// to extend a diagnostics created by a different subsystem.
|
||||
func (d Diagnostics) Append(diag *Diagnostic) Diagnostics {
|
||||
return append(d, diag)
|
||||
}
|
||||
|
||||
// Extend concatenates the given Diagnostics with the receiver and returns
|
||||
// the whole new Diagnostics.
|
||||
//
|
||||
// This is similar to Append but accepts multiple diagnostics to add. It has
|
||||
// all the same caveats and constraints.
|
||||
func (d Diagnostics) Extend(diags Diagnostics) Diagnostics {
|
||||
return append(d, diags...)
|
||||
}
|
||||
|
||||
// HasErrors returns true if the receiver contains any diagnostics of
|
||||
// severity DiagError.
|
||||
func (d Diagnostics) HasErrors() bool {
|
||||
for _, diag := range d {
|
||||
if diag.Severity == DiagError {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d Diagnostics) Errs() []error {
|
||||
var errs []error
|
||||
for _, diag := range d {
|
||||
if diag.Severity == DiagError {
|
||||
errs = append(errs, diag)
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// A DiagnosticWriter emits diagnostics somehow.
|
||||
type DiagnosticWriter interface {
|
||||
WriteDiagnostic(*Diagnostic) error
|
||||
WriteDiagnostics(Diagnostics) error
|
||||
}
|
||||
311
vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go
generated
vendored
Normal file
311
vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go
generated
vendored
Normal file
@@ -0,0 +1,311 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
wordwrap "github.com/mitchellh/go-wordwrap"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type diagnosticTextWriter struct {
|
||||
files map[string]*File
|
||||
wr io.Writer
|
||||
width uint
|
||||
color bool
|
||||
}
|
||||
|
||||
// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics
|
||||
// to the given writer as formatted text.
|
||||
//
|
||||
// It is designed to produce text appropriate to print in a monospaced font
|
||||
// in a terminal of a particular width, or optionally with no width limit.
|
||||
//
|
||||
// The given width may be zero to disable word-wrapping of the detail text
|
||||
// and truncation of source code snippets.
|
||||
//
|
||||
// If color is set to true, the output will include VT100 escape sequences to
|
||||
// color-code the severity indicators. It is suggested to turn this off if
|
||||
// the target writer is not a terminal.
|
||||
func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter {
|
||||
return &diagnosticTextWriter{
|
||||
files: files,
|
||||
wr: wr,
|
||||
width: width,
|
||||
color: color,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
|
||||
if diag == nil {
|
||||
return errors.New("nil diagnostic")
|
||||
}
|
||||
|
||||
var colorCode, highlightCode, resetCode string
|
||||
if w.color {
|
||||
switch diag.Severity {
|
||||
case DiagError:
|
||||
colorCode = "\x1b[31m"
|
||||
case DiagWarning:
|
||||
colorCode = "\x1b[33m"
|
||||
}
|
||||
resetCode = "\x1b[0m"
|
||||
highlightCode = "\x1b[1;4m"
|
||||
}
|
||||
|
||||
var severityStr string
|
||||
switch diag.Severity {
|
||||
case DiagError:
|
||||
severityStr = "Error"
|
||||
case DiagWarning:
|
||||
severityStr = "Warning"
|
||||
default:
|
||||
// should never happen
|
||||
severityStr = "???????"
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
|
||||
|
||||
if diag.Subject != nil {
|
||||
snipRange := *diag.Subject
|
||||
highlightRange := snipRange
|
||||
if diag.Context != nil {
|
||||
// Show enough of the source code to include both the subject
|
||||
// and context ranges, which overlap in all reasonable
|
||||
// situations.
|
||||
snipRange = RangeOver(snipRange, *diag.Context)
|
||||
}
|
||||
// We can't illustrate an empty range, so we'll turn such ranges into
|
||||
// single-character ranges, which might not be totally valid (may point
|
||||
// off the end of a line, or off the end of the file) but are good
|
||||
// enough for the bounds checks we do below.
|
||||
if snipRange.Empty() {
|
||||
snipRange.End.Byte++
|
||||
snipRange.End.Column++
|
||||
}
|
||||
if highlightRange.Empty() {
|
||||
highlightRange.End.Byte++
|
||||
highlightRange.End.Column++
|
||||
}
|
||||
|
||||
file := w.files[diag.Subject.Filename]
|
||||
if file == nil || file.Bytes == nil {
|
||||
fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
|
||||
} else {
|
||||
|
||||
var contextLine string
|
||||
if diag.Subject != nil {
|
||||
contextLine = contextString(file, diag.Subject.Start.Byte)
|
||||
if contextLine != "" {
|
||||
contextLine = ", in " + contextLine
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
|
||||
|
||||
src := file.Bytes
|
||||
sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
|
||||
|
||||
for sc.Scan() {
|
||||
lineRange := sc.Range()
|
||||
if !lineRange.Overlaps(snipRange) {
|
||||
continue
|
||||
}
|
||||
|
||||
beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
|
||||
if highlightedRange.Empty() {
|
||||
fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
|
||||
} else {
|
||||
before := beforeRange.SliceBytes(src)
|
||||
highlighted := highlightedRange.SliceBytes(src)
|
||||
after := afterRange.SliceBytes(src)
|
||||
fmt.Fprintf(
|
||||
w.wr, "%4d: %s%s%s%s%s\n",
|
||||
lineRange.Start.Line,
|
||||
before,
|
||||
highlightCode, highlighted, resetCode,
|
||||
after,
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
w.wr.Write([]byte{'\n'})
|
||||
}
|
||||
|
||||
if diag.Expression != nil && diag.EvalContext != nil {
|
||||
// We will attempt to render the values for any variables
|
||||
// referenced in the given expression as additional context, for
|
||||
// situations where the same expression is evaluated multiple
|
||||
// times in different scopes.
|
||||
expr := diag.Expression
|
||||
ctx := diag.EvalContext
|
||||
|
||||
vars := expr.Variables()
|
||||
stmts := make([]string, 0, len(vars))
|
||||
seen := make(map[string]struct{}, len(vars))
|
||||
for _, traversal := range vars {
|
||||
val, diags := traversal.TraverseAbs(ctx)
|
||||
if diags.HasErrors() {
|
||||
// Skip anything that generates errors, since we probably
|
||||
// already have the same error in our diagnostics set
|
||||
// already.
|
||||
continue
|
||||
}
|
||||
|
||||
traversalStr := w.traversalStr(traversal)
|
||||
if _, exists := seen[traversalStr]; exists {
|
||||
continue // don't show duplicates when the same variable is referenced multiple times
|
||||
}
|
||||
switch {
|
||||
case !val.IsKnown():
|
||||
// Can't say anything about this yet, then.
|
||||
continue
|
||||
case val.IsNull():
|
||||
stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr))
|
||||
default:
|
||||
stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val)))
|
||||
}
|
||||
seen[traversalStr] = struct{}{}
|
||||
}
|
||||
|
||||
sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
|
||||
last := len(stmts) - 1
|
||||
|
||||
for i, stmt := range stmts {
|
||||
switch i {
|
||||
case 0:
|
||||
w.wr.Write([]byte{'w', 'i', 't', 'h', ' '})
|
||||
default:
|
||||
w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '})
|
||||
}
|
||||
w.wr.Write([]byte(stmt))
|
||||
switch i {
|
||||
case last:
|
||||
w.wr.Write([]byte{'.', '\n', '\n'})
|
||||
default:
|
||||
w.wr.Write([]byte{',', '\n'})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if diag.Detail != "" {
|
||||
detail := diag.Detail
|
||||
if w.width != 0 {
|
||||
detail = wordwrap.WrapString(detail, w.width)
|
||||
}
|
||||
fmt.Fprintf(w.wr, "%s\n\n", detail)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
|
||||
for _, diag := range diags {
|
||||
err := w.WriteDiagnostic(diag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string {
|
||||
// This is a specialized subset of traversal rendering tailored to
|
||||
// producing helpful contextual messages in diagnostics. It is not
|
||||
// comprehensive nor intended to be used for other purposes.
|
||||
|
||||
var buf bytes.Buffer
|
||||
for _, step := range traversal {
|
||||
switch tStep := step.(type) {
|
||||
case TraverseRoot:
|
||||
buf.WriteString(tStep.Name)
|
||||
case TraverseAttr:
|
||||
buf.WriteByte('.')
|
||||
buf.WriteString(tStep.Name)
|
||||
case TraverseIndex:
|
||||
buf.WriteByte('[')
|
||||
if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
|
||||
buf.WriteString(w.valueStr(tStep.Key))
|
||||
} else {
|
||||
// We'll just use a placeholder for more complex values,
|
||||
// since otherwise our result could grow ridiculously long.
|
||||
buf.WriteString("...")
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (w *diagnosticTextWriter) valueStr(val cty.Value) string {
|
||||
// This is a specialized subset of value rendering tailored to producing
|
||||
// helpful but concise messages in diagnostics. It is not comprehensive
|
||||
// nor intended to be used for other purposes.
|
||||
|
||||
ty := val.Type()
|
||||
switch {
|
||||
case val.IsNull():
|
||||
return "null"
|
||||
case !val.IsKnown():
|
||||
// Should never happen here because we should filter before we get
|
||||
// in here, but we'll do something reasonable rather than panic.
|
||||
return "(not yet known)"
|
||||
case ty == cty.Bool:
|
||||
if val.True() {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
case ty == cty.Number:
|
||||
bf := val.AsBigFloat()
|
||||
return bf.Text('g', 10)
|
||||
case ty == cty.String:
|
||||
// Go string syntax is not exactly the same as HCL native string syntax,
|
||||
// but we'll accept the minor edge-cases where this is different here
|
||||
// for now, just to get something reasonable here.
|
||||
return fmt.Sprintf("%q", val.AsString())
|
||||
case ty.IsCollectionType() || ty.IsTupleType():
|
||||
l := val.LengthInt()
|
||||
switch l {
|
||||
case 0:
|
||||
return "empty " + ty.FriendlyName()
|
||||
case 1:
|
||||
return ty.FriendlyName() + " with 1 element"
|
||||
default:
|
||||
return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
|
||||
}
|
||||
case ty.IsObjectType():
|
||||
atys := ty.AttributeTypes()
|
||||
l := len(atys)
|
||||
switch l {
|
||||
case 0:
|
||||
return "object with no attributes"
|
||||
case 1:
|
||||
var name string
|
||||
for k := range atys {
|
||||
name = k
|
||||
}
|
||||
return fmt.Sprintf("object with 1 attribute %q", name)
|
||||
default:
|
||||
return fmt.Sprintf("object with %d attributes", l)
|
||||
}
|
||||
default:
|
||||
return ty.FriendlyName()
|
||||
}
|
||||
}
|
||||
|
||||
func contextString(file *File, offset int) string {
|
||||
type contextStringer interface {
|
||||
ContextString(offset int) string
|
||||
}
|
||||
|
||||
if cser, ok := file.Nav.(contextStringer); ok {
|
||||
return cser.ContextString(offset)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
24
vendor/github.com/hashicorp/hcl/v2/didyoumean.go
generated
vendored
Normal file
24
vendor/github.com/hashicorp/hcl/v2/didyoumean.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"github.com/agext/levenshtein"
|
||||
)
|
||||
|
||||
// nameSuggestion tries to find a name from the given slice of suggested names
|
||||
// that is close to the given name and returns it if found. If no suggestion
|
||||
// is close enough, returns the empty string.
|
||||
//
|
||||
// The suggestions are tried in order, so earlier suggestions take precedence
|
||||
// if the given string is similar to two or more suggestions.
|
||||
//
|
||||
// This function is intended to be used with a relatively-small number of
|
||||
// suggestions. It's not optimized for hundreds or thousands of them.
|
||||
func nameSuggestion(given string, suggestions []string) string {
|
||||
for _, suggestion := range suggestions {
|
||||
dist := levenshtein.Distance(given, suggestion, nil)
|
||||
if dist < 3 { // threshold determined experimentally
|
||||
return suggestion
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
34
vendor/github.com/hashicorp/hcl/v2/doc.go
generated
vendored
Normal file
34
vendor/github.com/hashicorp/hcl/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Package hcl contains the main modelling types and general utility functions
|
||||
// for HCL.
|
||||
//
|
||||
// For a simple entry point into HCL, see the package in the subdirectory
|
||||
// "hclsimple", which has an opinionated function Decode that can decode HCL
|
||||
// configurations in either native HCL syntax or JSON syntax into a Go struct
|
||||
// type:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "log"
|
||||
// "github.com/hashicorp/hcl/v2/hclsimple"
|
||||
// )
|
||||
//
|
||||
// type Config struct {
|
||||
// LogLevel string `hcl:"log_level"`
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// var config Config
|
||||
// err := hclsimple.DecodeFile("config.hcl", nil, &config)
|
||||
// if err != nil {
|
||||
// log.Fatalf("Failed to load configuration: %s", err)
|
||||
// }
|
||||
// log.Printf("Configuration is %#v", config)
|
||||
// }
|
||||
//
|
||||
// If your application needs more control over the evaluation of the
|
||||
// configuration, you can use the functions in the subdirectories hclparse,
|
||||
// gohcl, hcldec, etc. Splitting the handling of configuration into multiple
|
||||
// phases allows for advanced patterns such as allowing expressions in one
|
||||
// part of the configuration to refer to data defined in another part.
|
||||
package hcl
|
||||
25
vendor/github.com/hashicorp/hcl/v2/eval_context.go
generated
vendored
Normal file
25
vendor/github.com/hashicorp/hcl/v2/eval_context.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
)
|
||||
|
||||
// An EvalContext provides the variables and functions that should be used
|
||||
// to evaluate an expression.
|
||||
type EvalContext struct {
|
||||
Variables map[string]cty.Value
|
||||
Functions map[string]function.Function
|
||||
parent *EvalContext
|
||||
}
|
||||
|
||||
// NewChild returns a new EvalContext that is a child of the receiver.
|
||||
func (ctx *EvalContext) NewChild() *EvalContext {
|
||||
return &EvalContext{parent: ctx}
|
||||
}
|
||||
|
||||
// Parent returns the parent of the receiver, or nil if the receiver has
|
||||
// no parent.
|
||||
func (ctx *EvalContext) Parent() *EvalContext {
|
||||
return ctx.parent
|
||||
}
|
||||
46
vendor/github.com/hashicorp/hcl/v2/expr_call.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/hcl/v2/expr_call.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package hcl
|
||||
|
||||
// ExprCall tests if the given expression is a function call and,
|
||||
// if so, extracts the function name and the expressions that represent
|
||||
// the arguments. If the given expression is not statically a function call,
|
||||
// error diagnostics are returned.
|
||||
//
|
||||
// A particular Expression implementation can support this function by
|
||||
// offering a method called ExprCall that takes no arguments and returns
|
||||
// *StaticCall. This method should return nil if a static call cannot
|
||||
// be extracted. Alternatively, an implementation can support
|
||||
// UnwrapExpression to delegate handling of this function to a wrapped
|
||||
// Expression object.
|
||||
func ExprCall(expr Expression) (*StaticCall, Diagnostics) {
|
||||
type exprCall interface {
|
||||
ExprCall() *StaticCall
|
||||
}
|
||||
|
||||
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
|
||||
_, supported := expr.(exprCall)
|
||||
return supported
|
||||
})
|
||||
|
||||
if exC, supported := physExpr.(exprCall); supported {
|
||||
if call := exC.ExprCall(); call != nil {
|
||||
return call, nil
|
||||
}
|
||||
}
|
||||
return nil, Diagnostics{
|
||||
&Diagnostic{
|
||||
Severity: DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: "A static function call is required.",
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// StaticCall represents a function call that was extracted statically from
|
||||
// an expression using ExprCall.
|
||||
type StaticCall struct {
|
||||
Name string
|
||||
NameRange Range
|
||||
Arguments []Expression
|
||||
ArgsRange Range
|
||||
}
|
||||
37
vendor/github.com/hashicorp/hcl/v2/expr_list.go
generated
vendored
Normal file
37
vendor/github.com/hashicorp/hcl/v2/expr_list.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package hcl
|
||||
|
||||
// ExprList tests if the given expression is a static list construct and,
|
||||
// if so, extracts the expressions that represent the list elements.
|
||||
// If the given expression is not a static list, error diagnostics are
|
||||
// returned.
|
||||
//
|
||||
// A particular Expression implementation can support this function by
|
||||
// offering a method called ExprList that takes no arguments and returns
|
||||
// []Expression. This method should return nil if a static list cannot
|
||||
// be extracted. Alternatively, an implementation can support
|
||||
// UnwrapExpression to delegate handling of this function to a wrapped
|
||||
// Expression object.
|
||||
func ExprList(expr Expression) ([]Expression, Diagnostics) {
|
||||
type exprList interface {
|
||||
ExprList() []Expression
|
||||
}
|
||||
|
||||
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
|
||||
_, supported := expr.(exprList)
|
||||
return supported
|
||||
})
|
||||
|
||||
if exL, supported := physExpr.(exprList); supported {
|
||||
if list := exL.ExprList(); list != nil {
|
||||
return list, nil
|
||||
}
|
||||
}
|
||||
return nil, Diagnostics{
|
||||
&Diagnostic{
|
||||
Severity: DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: "A static list expression is required.",
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
44
vendor/github.com/hashicorp/hcl/v2/expr_map.go
generated
vendored
Normal file
44
vendor/github.com/hashicorp/hcl/v2/expr_map.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package hcl
|
||||
|
||||
// ExprMap tests if the given expression is a static map construct and,
|
||||
// if so, extracts the expressions that represent the map elements.
|
||||
// If the given expression is not a static map, error diagnostics are
|
||||
// returned.
|
||||
//
|
||||
// A particular Expression implementation can support this function by
|
||||
// offering a method called ExprMap that takes no arguments and returns
|
||||
// []KeyValuePair. This method should return nil if a static map cannot
|
||||
// be extracted. Alternatively, an implementation can support
|
||||
// UnwrapExpression to delegate handling of this function to a wrapped
|
||||
// Expression object.
|
||||
func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) {
|
||||
type exprMap interface {
|
||||
ExprMap() []KeyValuePair
|
||||
}
|
||||
|
||||
physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
|
||||
_, supported := expr.(exprMap)
|
||||
return supported
|
||||
})
|
||||
|
||||
if exM, supported := physExpr.(exprMap); supported {
|
||||
if pairs := exM.ExprMap(); pairs != nil {
|
||||
return pairs, nil
|
||||
}
|
||||
}
|
||||
return nil, Diagnostics{
|
||||
&Diagnostic{
|
||||
Severity: DiagError,
|
||||
Summary: "Invalid expression",
|
||||
Detail: "A static map expression is required.",
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// KeyValuePair represents a pair of expressions that serve as a single item
|
||||
// within a map or object definition construct.
|
||||
type KeyValuePair struct {
|
||||
Key Expression
|
||||
Value Expression
|
||||
}
|
||||
68
vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go
generated
vendored
Normal file
68
vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package hcl
|
||||
|
||||
type unwrapExpression interface {
|
||||
UnwrapExpression() Expression
|
||||
}
|
||||
|
||||
// UnwrapExpression removes any "wrapper" expressions from the given expression,
|
||||
// to recover the representation of the physical expression given in source
|
||||
// code.
|
||||
//
|
||||
// Sometimes wrapping expressions are used to modify expression behavior, e.g.
|
||||
// in extensions that need to make some local variables available to certain
|
||||
// sub-trees of the configuration. This can make it difficult to reliably
|
||||
// type-assert on the physical AST types used by the underlying syntax.
|
||||
//
|
||||
// Unwrapping an expression may modify its behavior by stripping away any
|
||||
// additional constraints or capabilities being applied to the Value and
|
||||
// Variables methods, so this function should generally only be used prior
|
||||
// to operations that concern themselves with the static syntax of the input
|
||||
// configuration, and not with the effective value of the expression.
|
||||
//
|
||||
// Wrapper expression types must support unwrapping by implementing a method
|
||||
// called UnwrapExpression that takes no arguments and returns the embedded
|
||||
// Expression. Implementations of this method should peel away only one level
|
||||
// of wrapping, if multiple are present. This method may return nil to
|
||||
// indicate _dynamically_ that no wrapped expression is available, for
|
||||
// expression types that might only behave as wrappers in certain cases.
|
||||
func UnwrapExpression(expr Expression) Expression {
|
||||
for {
|
||||
unwrap, wrapped := expr.(unwrapExpression)
|
||||
if !wrapped {
|
||||
return expr
|
||||
}
|
||||
innerExpr := unwrap.UnwrapExpression()
|
||||
if innerExpr == nil {
|
||||
return expr
|
||||
}
|
||||
expr = innerExpr
|
||||
}
|
||||
}
|
||||
|
||||
// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the
|
||||
// caller an opportunity to test each level of unwrapping to see each a
|
||||
// particular expression is accepted.
|
||||
//
|
||||
// This could be used, for example, to unwrap until a particular other
|
||||
// interface is satisfied, regardless of wrap wrapping level it is satisfied
|
||||
// at.
|
||||
//
|
||||
// The given callback function must return false to continue wrapping, or
|
||||
// true to accept and return the proposed expression given. If the callback
|
||||
// function rejects even the final, physical expression then the result of
|
||||
// this function is nil.
|
||||
func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression {
|
||||
for {
|
||||
if until(expr) {
|
||||
return expr
|
||||
}
|
||||
unwrap, wrapped := expr.(unwrapExpression)
|
||||
if !wrapped {
|
||||
return nil
|
||||
}
|
||||
expr = unwrap.UnwrapExpression()
|
||||
if expr == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
184
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md
generated
vendored
Normal file
184
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
# HCL Dynamic Blocks Extension
|
||||
|
||||
This HCL extension implements a special block type named "dynamic" that can
|
||||
be used to dynamically generate blocks of other types by iterating over
|
||||
collection values.
|
||||
|
||||
Normally the block structure in an HCL configuration file is rigid, even
|
||||
though dynamic expressions can be used within attribute values. This is
|
||||
convenient for most applications since it allows the overall structure of
|
||||
the document to be decoded easily, but in some applications it is desirable
|
||||
to allow dynamic block generation within certain portions of the configuration.
|
||||
|
||||
Dynamic block generation is performed using the `dynamic` block type:
|
||||
|
||||
```hcl
|
||||
toplevel {
|
||||
nested {
|
||||
foo = "static block 1"
|
||||
}
|
||||
|
||||
dynamic "nested" {
|
||||
for_each = ["a", "b", "c"]
|
||||
iterator = nested
|
||||
content {
|
||||
foo = "dynamic block ${nested.value}"
|
||||
}
|
||||
}
|
||||
|
||||
nested {
|
||||
foo = "static block 2"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The above is interpreted as if it were written as follows:
|
||||
|
||||
```hcl
|
||||
toplevel {
|
||||
nested {
|
||||
foo = "static block 1"
|
||||
}
|
||||
|
||||
nested {
|
||||
foo = "dynamic block a"
|
||||
}
|
||||
|
||||
nested {
|
||||
foo = "dynamic block b"
|
||||
}
|
||||
|
||||
nested {
|
||||
foo = "dynamic block c"
|
||||
}
|
||||
|
||||
nested {
|
||||
foo = "static block 2"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Since HCL block syntax is not normally exposed to the possibility of unknown
|
||||
values, this extension must make some compromises when asked to iterate over
|
||||
an unknown collection. If the length of the collection cannot be statically
|
||||
recognized (because it is an unknown value of list, map, or set type) then
|
||||
the `dynamic` construct will generate a _single_ dynamic block whose iterator
|
||||
key and value are both unknown values of the dynamic pseudo-type, thus causing
|
||||
any attribute values derived from iteration to appear as unknown values. There
|
||||
is no explicit representation of the fact that the length of the collection may
|
||||
eventually be different than one.
|
||||
|
||||
## Usage
|
||||
|
||||
Pass a body to function `Expand` to obtain a new body that will, on access
|
||||
to its content, evaluate and expand any nested `dynamic` blocks.
|
||||
Dynamic block processing is also automatically propagated into any nested
|
||||
blocks that are returned, allowing users to nest dynamic blocks inside
|
||||
one another and to nest dynamic blocks inside other static blocks.
|
||||
|
||||
HCL structural decoding does not normally have access to an `EvalContext`, so
|
||||
any variables and functions that should be available to the `for_each`
|
||||
and `labels` expressions must be passed in when calling `Expand`. Expressions
|
||||
within the `content` block are evaluated separately and so can be passed a
|
||||
separate `EvalContext` if desired, during normal attribute expression
|
||||
evaluation.
|
||||
|
||||
## Detecting Variables
|
||||
|
||||
Some applications dynamically generate an `EvalContext` by analyzing which
|
||||
variables are referenced by an expression before evaluating it.
|
||||
|
||||
This unfortunately requires some extra effort when this analysis is required
|
||||
for the context passed to `Expand`: the HCL API requires a schema to be
|
||||
provided in order to do any analysis of the blocks in a body, but the low-level
|
||||
schema model provides a description of only one level of nested blocks at
|
||||
a time, and thus a new schema must be provided for each additional level of
|
||||
nesting.
|
||||
|
||||
To make this arduous process as convenient as possible, this package provides
|
||||
a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode`
|
||||
instance that can be used to find variables directly in a given body and also
|
||||
determine which nested blocks require recursive calls. Using this mechanism
|
||||
requires that the caller be able to look up a schema given a nested block type.
|
||||
For _simple_ formats where a specific block type name always has the same schema
|
||||
regardless of context, a walk can be implemented as follows:
|
||||
|
||||
```go
|
||||
func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal {
|
||||
vars, children := node.Visit(schema)
|
||||
|
||||
for _, child := range children {
|
||||
var childSchema *hcl.BodySchema
|
||||
switch child.BlockTypeName {
|
||||
case "a":
|
||||
childSchema = &hcl.BodySchema{
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "b",
|
||||
LabelNames: []string{"key"},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "b":
|
||||
childSchema = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "val",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
// Should never happen, because the above cases should be exhaustive
|
||||
// for the application's configuration format.
|
||||
panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName))
|
||||
}
|
||||
|
||||
vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Detecting Variables with `hcldec` Specifications
|
||||
|
||||
For applications that use the higher-level `hcldec` package to decode nested
|
||||
configuration structures into `cty` values, the same specification can be used
|
||||
to automatically drive the recursive variable-detection walk described above.
|
||||
|
||||
The helper function `ForEachVariablesHCLDec` allows an entire recursive
|
||||
configuration structure to be analyzed in a single call given a `hcldec.Spec`
|
||||
that describes the nested block structure. This means a `hcldec`-based
|
||||
application can support dynamic blocks with only a little additional effort:
|
||||
|
||||
```go
|
||||
func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) {
|
||||
// Determine which variables are needed to expand dynamic blocks
|
||||
neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec)
|
||||
|
||||
// Build a suitable EvalContext and expand dynamic blocks
|
||||
dynCtx := buildEvalContext(neededForDynamic)
|
||||
dynBody := dynblock.Expand(body, dynCtx)
|
||||
|
||||
// Determine which variables are needed to fully decode the expanded body
|
||||
// This will analyze expressions that came both from static blocks in the
|
||||
// original body and from blocks that were dynamically added by Expand.
|
||||
neededForDecode := hcldec.Variables(dynBody, spec)
|
||||
|
||||
// Build a suitable EvalContext and then fully decode the body as per the
|
||||
// hcldec specification.
|
||||
decCtx := buildEvalContext(neededForDecode)
|
||||
return hcldec.Decode(dynBody, spec, decCtx)
|
||||
}
|
||||
|
||||
func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext {
|
||||
// (to be implemented by your application)
|
||||
}
|
||||
```
|
||||
|
||||
# Performance
|
||||
|
||||
This extension is going quite harshly against the grain of the HCL API, and
|
||||
so it uses lots of wrapping objects and temporary data structures to get its
|
||||
work done. HCL in general is not suitable for use in high-performance situations
|
||||
or situations sensitive to memory pressure, but that is _especially_ true for
|
||||
this extension.
|
||||
262
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go
generated
vendored
Normal file
262
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go
generated
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// expandBody wraps another hcl.Body and expands any "dynamic" blocks found
|
||||
// inside whenever Content or PartialContent is called.
|
||||
type expandBody struct {
|
||||
original hcl.Body
|
||||
forEachCtx *hcl.EvalContext
|
||||
iteration *iteration // non-nil if we're nested inside another "dynamic" block
|
||||
|
||||
// These are used with PartialContent to produce a "remaining items"
|
||||
// body to return. They are nil on all bodies fresh out of the transformer.
|
||||
//
|
||||
// Note that this is re-implemented here rather than delegating to the
|
||||
// existing support required by the underlying body because we need to
|
||||
// retain access to the entire original body on subsequent decode operations
|
||||
// so we can retain any "dynamic" blocks for types we didn't take consume
|
||||
// on the first pass.
|
||||
hiddenAttrs map[string]struct{}
|
||||
hiddenBlocks map[string]hcl.BlockHeaderSchema
|
||||
}
|
||||
|
||||
func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||
extSchema := b.extendSchema(schema)
|
||||
rawContent, diags := b.original.Content(extSchema)
|
||||
|
||||
blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false)
|
||||
diags = append(diags, blockDiags...)
|
||||
attrs := b.prepareAttributes(rawContent.Attributes)
|
||||
|
||||
content := &hcl.BodyContent{
|
||||
Attributes: attrs,
|
||||
Blocks: blocks,
|
||||
MissingItemRange: b.original.MissingItemRange(),
|
||||
}
|
||||
|
||||
return content, diags
|
||||
}
|
||||
|
||||
func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||
extSchema := b.extendSchema(schema)
|
||||
rawContent, _, diags := b.original.PartialContent(extSchema)
|
||||
// We discard the "remain" argument above because we're going to construct
|
||||
// our own remain that also takes into account remaining "dynamic" blocks.
|
||||
|
||||
blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true)
|
||||
diags = append(diags, blockDiags...)
|
||||
attrs := b.prepareAttributes(rawContent.Attributes)
|
||||
|
||||
content := &hcl.BodyContent{
|
||||
Attributes: attrs,
|
||||
Blocks: blocks,
|
||||
MissingItemRange: b.original.MissingItemRange(),
|
||||
}
|
||||
|
||||
remain := &expandBody{
|
||||
original: b.original,
|
||||
forEachCtx: b.forEachCtx,
|
||||
iteration: b.iteration,
|
||||
hiddenAttrs: make(map[string]struct{}),
|
||||
hiddenBlocks: make(map[string]hcl.BlockHeaderSchema),
|
||||
}
|
||||
for name := range b.hiddenAttrs {
|
||||
remain.hiddenAttrs[name] = struct{}{}
|
||||
}
|
||||
for typeName, blockS := range b.hiddenBlocks {
|
||||
remain.hiddenBlocks[typeName] = blockS
|
||||
}
|
||||
for _, attrS := range schema.Attributes {
|
||||
remain.hiddenAttrs[attrS.Name] = struct{}{}
|
||||
}
|
||||
for _, blockS := range schema.Blocks {
|
||||
remain.hiddenBlocks[blockS.Type] = blockS
|
||||
}
|
||||
|
||||
return content, remain, diags
|
||||
}
|
||||
|
||||
func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema {
|
||||
// We augment the requested schema to also include our special "dynamic"
|
||||
// block type, since then we'll get instances of it interleaved with
|
||||
// all of the literal child blocks we must also include.
|
||||
extSchema := &hcl.BodySchema{
|
||||
Attributes: schema.Attributes,
|
||||
Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1),
|
||||
}
|
||||
copy(extSchema.Blocks, schema.Blocks)
|
||||
extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema)
|
||||
|
||||
// If we have any hiddenBlocks then we also need to register those here
|
||||
// so that a call to "Content" on the underlying body won't fail.
|
||||
// (We'll filter these out again once we process the result of either
|
||||
// Content or PartialContent.)
|
||||
for _, blockS := range b.hiddenBlocks {
|
||||
extSchema.Blocks = append(extSchema.Blocks, blockS)
|
||||
}
|
||||
|
||||
// If we have any hiddenAttrs then we also need to register these, for
|
||||
// the same reason as we deal with hiddenBlocks above.
|
||||
if len(b.hiddenAttrs) != 0 {
|
||||
newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs))
|
||||
copy(newAttrs, extSchema.Attributes)
|
||||
for name := range b.hiddenAttrs {
|
||||
newAttrs = append(newAttrs, hcl.AttributeSchema{
|
||||
Name: name,
|
||||
Required: false,
|
||||
})
|
||||
}
|
||||
extSchema.Attributes = newAttrs
|
||||
}
|
||||
|
||||
return extSchema
|
||||
}
|
||||
|
||||
func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes {
|
||||
if len(b.hiddenAttrs) == 0 && b.iteration == nil {
|
||||
// Easy path: just pass through the attrs from the original body verbatim
|
||||
return rawAttrs
|
||||
}
|
||||
|
||||
// Otherwise we have some work to do: we must filter out any attributes
|
||||
// that are hidden (since a previous PartialContent call already saw these)
|
||||
// and wrap the expressions of the inner attributes so that they will
|
||||
// have access to our iteration variables.
|
||||
attrs := make(hcl.Attributes, len(rawAttrs))
|
||||
for name, rawAttr := range rawAttrs {
|
||||
if _, hidden := b.hiddenAttrs[name]; hidden {
|
||||
continue
|
||||
}
|
||||
if b.iteration != nil {
|
||||
attr := *rawAttr // shallow copy so we can mutate it
|
||||
attr.Expr = exprWrap{
|
||||
Expression: attr.Expr,
|
||||
i: b.iteration,
|
||||
}
|
||||
attrs[name] = &attr
|
||||
} else {
|
||||
// If we have no active iteration then no wrapping is required.
|
||||
attrs[name] = rawAttr
|
||||
}
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) {
|
||||
var blocks hcl.Blocks
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
for _, rawBlock := range rawBlocks {
|
||||
switch rawBlock.Type {
|
||||
case "dynamic":
|
||||
realBlockType := rawBlock.Labels[0]
|
||||
if _, hidden := b.hiddenBlocks[realBlockType]; hidden {
|
||||
continue
|
||||
}
|
||||
|
||||
var blockS *hcl.BlockHeaderSchema
|
||||
for _, candidate := range schema.Blocks {
|
||||
if candidate.Type == realBlockType {
|
||||
blockS = &candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
if blockS == nil {
|
||||
// Not a block type that the caller requested.
|
||||
if !partial {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported block type",
|
||||
Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType),
|
||||
Subject: &rawBlock.LabelRanges[0],
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
spec, specDiags := b.decodeSpec(blockS, rawBlock)
|
||||
diags = append(diags, specDiags...)
|
||||
if specDiags.HasErrors() {
|
||||
continue
|
||||
}
|
||||
|
||||
if spec.forEachVal.IsKnown() {
|
||||
for it := spec.forEachVal.ElementIterator(); it.Next(); {
|
||||
key, value := it.Element()
|
||||
i := b.iteration.MakeChild(spec.iteratorName, key, value)
|
||||
|
||||
block, blockDiags := spec.newBlock(i, b.forEachCtx)
|
||||
diags = append(diags, blockDiags...)
|
||||
if block != nil {
|
||||
// Attach our new iteration context so that attributes
|
||||
// and other nested blocks can refer to our iterator.
|
||||
block.Body = b.expandChild(block.Body, i)
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If our top-level iteration value isn't known then we're forced
|
||||
// to compromise since HCL doesn't have any concept of an
|
||||
// "unknown block". In this case then, we'll produce a single
|
||||
// dynamic block with the iterator values set to DynamicVal,
|
||||
// which at least makes the potential for a block visible
|
||||
// in our result, even though it's not represented in a fully-accurate
|
||||
// way.
|
||||
i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal)
|
||||
block, blockDiags := spec.newBlock(i, b.forEachCtx)
|
||||
diags = append(diags, blockDiags...)
|
||||
if block != nil {
|
||||
block.Body = b.expandChild(block.Body, i)
|
||||
|
||||
// We additionally force all of the leaf attribute values
|
||||
// in the result to be unknown so the calling application
|
||||
// can, if necessary, use that as a heuristic to detect
|
||||
// when a single nested block might be standing in for
|
||||
// multiple blocks yet to be expanded. This retains the
|
||||
// structure of the generated body but forces all of its
|
||||
// leaf attribute values to be unknown.
|
||||
block.Body = unknownBody{block.Body}
|
||||
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden {
|
||||
// A static block doesn't create a new iteration context, but
|
||||
// it does need to inherit _our own_ iteration context in
|
||||
// case it contains expressions that refer to our inherited
|
||||
// iterators, or nested "dynamic" blocks.
|
||||
expandedBlock := *rawBlock // shallow copy
|
||||
expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration)
|
||||
blocks = append(blocks, &expandedBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return blocks, diags
|
||||
}
|
||||
|
||||
func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body {
|
||||
chiCtx := i.EvalContext(b.forEachCtx)
|
||||
ret := Expand(child, chiCtx)
|
||||
ret.(*expandBody).iteration = i
|
||||
return ret
|
||||
}
|
||||
|
||||
func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||
// blocks aren't allowed in JustAttributes mode and this body can
|
||||
// only produce blocks, so we'll just pass straight through to our
|
||||
// underlying body here.
|
||||
return b.original.JustAttributes()
|
||||
}
|
||||
|
||||
func (b *expandBody) MissingItemRange() hcl.Range {
|
||||
return b.original.MissingItemRange()
|
||||
}
|
||||
215
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go
generated
vendored
Normal file
215
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go
generated
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
type expandSpec struct {
|
||||
blockType string
|
||||
blockTypeRange hcl.Range
|
||||
defRange hcl.Range
|
||||
forEachVal cty.Value
|
||||
iteratorName string
|
||||
labelExprs []hcl.Expression
|
||||
contentBody hcl.Body
|
||||
inherited map[string]*iteration
|
||||
}
|
||||
|
||||
func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
var schema *hcl.BodySchema
|
||||
if len(blockS.LabelNames) != 0 {
|
||||
schema = dynamicBlockBodySchemaLabels
|
||||
} else {
|
||||
schema = dynamicBlockBodySchemaNoLabels
|
||||
}
|
||||
|
||||
specContent, specDiags := rawSpec.Body.Content(schema)
|
||||
diags = append(diags, specDiags...)
|
||||
if specDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
//// for_each attribute
|
||||
|
||||
eachAttr := specContent.Attributes["for_each"]
|
||||
eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx)
|
||||
diags = append(diags, eachDiags...)
|
||||
|
||||
if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType {
|
||||
// We skip this error for DynamicPseudoType because that means we either
|
||||
// have a null (which is checked immediately below) or an unknown
|
||||
// (which is handled in the expandBody Content methods).
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic for_each value",
|
||||
Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()),
|
||||
Subject: eachAttr.Expr.Range().Ptr(),
|
||||
Expression: eachAttr.Expr,
|
||||
EvalContext: b.forEachCtx,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
if eachVal.IsNull() {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic for_each value",
|
||||
Detail: "Cannot use a null value in for_each.",
|
||||
Subject: eachAttr.Expr.Range().Ptr(),
|
||||
Expression: eachAttr.Expr,
|
||||
EvalContext: b.forEachCtx,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
//// iterator attribute
|
||||
|
||||
iteratorName := blockS.Type
|
||||
if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil {
|
||||
itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr)
|
||||
diags = append(diags, itDiags...)
|
||||
if itDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
if len(itTraversal) != 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic iterator name",
|
||||
Detail: "Dynamic iterator must be a single variable name.",
|
||||
Subject: itTraversal.SourceRange().Ptr(),
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
iteratorName = itTraversal.RootName()
|
||||
}
|
||||
|
||||
var labelExprs []hcl.Expression
|
||||
if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil {
|
||||
var labelDiags hcl.Diagnostics
|
||||
labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr)
|
||||
diags = append(diags, labelDiags...)
|
||||
if labelDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
if len(labelExprs) > len(blockS.LabelNames) {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Extraneous dynamic block label",
|
||||
Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)),
|
||||
Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(),
|
||||
})
|
||||
return nil, diags
|
||||
} else if len(labelExprs) < len(blockS.LabelNames) {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Insufficient dynamic block labels",
|
||||
Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)),
|
||||
Subject: labelsAttr.Expr.Range().Ptr(),
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
}
|
||||
|
||||
// Since our schema requests only blocks of type "content", we can assume
|
||||
// that all entries in specContent.Blocks are content blocks.
|
||||
if len(specContent.Blocks) == 0 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing dynamic content block",
|
||||
Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.",
|
||||
Subject: &specContent.MissingItemRange,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
if len(specContent.Blocks) > 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Extraneous dynamic content block",
|
||||
Detail: "Only one nested content block is allowed for each dynamic block.",
|
||||
Subject: &specContent.Blocks[1].DefRange,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
return &expandSpec{
|
||||
blockType: blockS.Type,
|
||||
blockTypeRange: rawSpec.LabelRanges[0],
|
||||
defRange: rawSpec.DefRange,
|
||||
forEachVal: eachVal,
|
||||
iteratorName: iteratorName,
|
||||
labelExprs: labelExprs,
|
||||
contentBody: specContent.Blocks[0].Body,
|
||||
}, diags
|
||||
}
|
||||
|
||||
func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) {
|
||||
var diags hcl.Diagnostics
|
||||
var labels []string
|
||||
var labelRanges []hcl.Range
|
||||
lCtx := i.EvalContext(ctx)
|
||||
for _, labelExpr := range s.labelExprs {
|
||||
labelVal, labelDiags := labelExpr.Value(lCtx)
|
||||
diags = append(diags, labelDiags...)
|
||||
if labelDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
var convErr error
|
||||
labelVal, convErr = convert.Convert(labelVal, cty.String)
|
||||
if convErr != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic block label",
|
||||
Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr),
|
||||
Subject: labelExpr.Range().Ptr(),
|
||||
Expression: labelExpr,
|
||||
EvalContext: lCtx,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
if labelVal.IsNull() {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic block label",
|
||||
Detail: "Cannot use a null value as a dynamic block label.",
|
||||
Subject: labelExpr.Range().Ptr(),
|
||||
Expression: labelExpr,
|
||||
EvalContext: lCtx,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
if !labelVal.IsKnown() {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid dynamic block label",
|
||||
Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.",
|
||||
Subject: labelExpr.Range().Ptr(),
|
||||
Expression: labelExpr,
|
||||
EvalContext: lCtx,
|
||||
})
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
labels = append(labels, labelVal.AsString())
|
||||
labelRanges = append(labelRanges, labelExpr.Range())
|
||||
}
|
||||
|
||||
block := &hcl.Block{
|
||||
Type: s.blockType,
|
||||
TypeRange: s.blockTypeRange,
|
||||
Labels: labels,
|
||||
LabelRanges: labelRanges,
|
||||
DefRange: s.defRange,
|
||||
Body: s.contentBody,
|
||||
}
|
||||
|
||||
return block, diags
|
||||
}
|
||||
42
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go
generated
vendored
Normal file
42
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type exprWrap struct {
|
||||
hcl.Expression
|
||||
i *iteration
|
||||
}
|
||||
|
||||
func (e exprWrap) Variables() []hcl.Traversal {
|
||||
raw := e.Expression.Variables()
|
||||
ret := make([]hcl.Traversal, 0, len(raw))
|
||||
|
||||
// Filter out traversals that refer to our iterator name or any
|
||||
// iterator we've inherited; we're going to provide those in
|
||||
// our Value wrapper, so the caller doesn't need to know about them.
|
||||
for _, traversal := range raw {
|
||||
rootName := traversal.RootName()
|
||||
if rootName == e.i.IteratorName {
|
||||
continue
|
||||
}
|
||||
if _, inherited := e.i.Inherited[rootName]; inherited {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, traversal)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
extCtx := e.i.EvalContext(ctx)
|
||||
return e.Expression.Value(extCtx)
|
||||
}
|
||||
|
||||
// UnwrapExpression returns the expression being wrapped by this instance.
|
||||
// This allows the original expression to be recovered by hcl.UnwrapExpression.
|
||||
func (e exprWrap) UnwrapExpression() hcl.Expression {
|
||||
return e.Expression
|
||||
}
|
||||
66
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go
generated
vendored
Normal file
66
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type iteration struct {
|
||||
IteratorName string
|
||||
Key cty.Value
|
||||
Value cty.Value
|
||||
Inherited map[string]*iteration
|
||||
}
|
||||
|
||||
func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration {
|
||||
return &iteration{
|
||||
IteratorName: s.iteratorName,
|
||||
Key: key,
|
||||
Value: value,
|
||||
Inherited: s.inherited,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *iteration) Object() cty.Value {
|
||||
return cty.ObjectVal(map[string]cty.Value{
|
||||
"key": i.Key,
|
||||
"value": i.Value,
|
||||
})
|
||||
}
|
||||
|
||||
func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext {
|
||||
new := base.NewChild()
|
||||
|
||||
if i != nil {
|
||||
new.Variables = map[string]cty.Value{}
|
||||
for name, otherIt := range i.Inherited {
|
||||
new.Variables[name] = otherIt.Object()
|
||||
}
|
||||
new.Variables[i.IteratorName] = i.Object()
|
||||
}
|
||||
|
||||
return new
|
||||
}
|
||||
|
||||
func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration {
|
||||
if i == nil {
|
||||
// Create entirely new root iteration, then
|
||||
return &iteration{
|
||||
IteratorName: iteratorName,
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
inherited := map[string]*iteration{}
|
||||
for name, otherIt := range i.Inherited {
|
||||
inherited[name] = otherIt
|
||||
}
|
||||
inherited[i.IteratorName] = i
|
||||
return &iteration{
|
||||
IteratorName: iteratorName,
|
||||
Key: key,
|
||||
Value: value,
|
||||
Inherited: inherited,
|
||||
}
|
||||
}
|
||||
47
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go
generated
vendored
Normal file
47
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Package dynblock provides an extension to HCL that allows dynamic
|
||||
// declaration of nested blocks in certain contexts via a special block type
|
||||
// named "dynamic".
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// Expand "dynamic" blocks in the given body, returning a new body that
|
||||
// has those blocks expanded.
|
||||
//
|
||||
// The given EvalContext is used when evaluating "for_each" and "labels"
|
||||
// attributes within dynamic blocks, allowing those expressions access to
|
||||
// variables and functions beyond the iterator variable created by the
|
||||
// iteration.
|
||||
//
|
||||
// Expand returns no diagnostics because no blocks are actually expanded
|
||||
// until a call to Content or PartialContent on the returned body, which
|
||||
// will then expand only the blocks selected by the schema.
|
||||
//
|
||||
// "dynamic" blocks are also expanded automatically within nested blocks
|
||||
// in the given body, including within other dynamic blocks, thus allowing
|
||||
// multi-dimensional iteration. However, it is not possible to
|
||||
// dynamically-generate the "dynamic" blocks themselves except through nesting.
|
||||
//
|
||||
// parent {
|
||||
// dynamic "child" {
|
||||
// for_each = child_objs
|
||||
// content {
|
||||
// dynamic "grandchild" {
|
||||
// for_each = child.value.children
|
||||
// labels = [grandchild.key]
|
||||
// content {
|
||||
// parent_key = child.key
|
||||
// value = grandchild.value
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body {
|
||||
return &expandBody{
|
||||
original: body,
|
||||
forEachCtx: ctx,
|
||||
}
|
||||
}
|
||||
50
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go
generated
vendored
Normal file
50
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package dynblock
|
||||
|
||||
import "github.com/hashicorp/hcl/v2"
|
||||
|
||||
var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{
|
||||
Type: "dynamic",
|
||||
LabelNames: []string{"type"},
|
||||
}
|
||||
|
||||
var dynamicBlockBodySchemaLabels = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "for_each",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "iterator",
|
||||
Required: false,
|
||||
},
|
||||
{
|
||||
Name: "labels",
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "content",
|
||||
LabelNames: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "for_each",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "iterator",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "content",
|
||||
LabelNames: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
84
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go
generated
vendored
Normal file
84
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// unknownBody is a funny body that just reports everything inside it as
|
||||
// unknown. It uses a given other body as a sort of template for what attributes
|
||||
// and blocks are inside -- including source location information -- but
|
||||
// subsitutes unknown values of unknown type for all attributes.
|
||||
//
|
||||
// This rather odd process is used to handle expansion of dynamic blocks whose
|
||||
// for_each expression is unknown. Since a block cannot itself be unknown,
|
||||
// we instead arrange for everything _inside_ the block to be unknown instead,
|
||||
// to give the best possible approximation.
|
||||
type unknownBody struct {
|
||||
template hcl.Body
|
||||
}
|
||||
|
||||
var _ hcl.Body = unknownBody{}
|
||||
|
||||
func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||
content, diags := b.template.Content(schema)
|
||||
content = b.fixupContent(content)
|
||||
|
||||
// We're intentionally preserving the diagnostics reported from the
|
||||
// inner body so that we can still report where the template body doesn't
|
||||
// match the requested schema.
|
||||
return content, diags
|
||||
}
|
||||
|
||||
func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||
content, remain, diags := b.template.PartialContent(schema)
|
||||
content = b.fixupContent(content)
|
||||
remain = unknownBody{remain} // remaining content must also be wrapped
|
||||
|
||||
// We're intentionally preserving the diagnostics reported from the
|
||||
// inner body so that we can still report where the template body doesn't
|
||||
// match the requested schema.
|
||||
return content, remain, diags
|
||||
}
|
||||
|
||||
func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||
attrs, diags := b.template.JustAttributes()
|
||||
attrs = b.fixupAttrs(attrs)
|
||||
|
||||
// We're intentionally preserving the diagnostics reported from the
|
||||
// inner body so that we can still report where the template body doesn't
|
||||
// match the requested schema.
|
||||
return attrs, diags
|
||||
}
|
||||
|
||||
func (b unknownBody) MissingItemRange() hcl.Range {
|
||||
return b.template.MissingItemRange()
|
||||
}
|
||||
|
||||
func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent {
|
||||
ret := &hcl.BodyContent{}
|
||||
ret.Attributes = b.fixupAttrs(got.Attributes)
|
||||
if len(got.Blocks) > 0 {
|
||||
ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks))
|
||||
for _, gotBlock := range got.Blocks {
|
||||
new := *gotBlock // shallow copy
|
||||
new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown
|
||||
ret.Blocks = append(ret.Blocks, &new)
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes {
|
||||
if len(got) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret := make(hcl.Attributes, len(got))
|
||||
for name, gotAttr := range got {
|
||||
new := *gotAttr // shallow copy
|
||||
new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range())
|
||||
ret[name] = &new
|
||||
}
|
||||
return ret
|
||||
}
|
||||
209
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go
generated
vendored
Normal file
209
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// WalkVariables begins the recursive process of walking all expressions and
|
||||
// nested blocks in the given body and its child bodies while taking into
|
||||
// account any "dynamic" blocks.
|
||||
//
|
||||
// This function requires that the caller walk through the nested block
|
||||
// structure in the given body level-by-level so that an appropriate schema
|
||||
// can be provided at each level to inform further processing. This workflow
|
||||
// is thus easiest to use for calling applications that have some higher-level
|
||||
// schema representation available with which to drive this multi-step
|
||||
// process. If your application uses the hcldec package, you may be able to
|
||||
// use VariablesHCLDec instead for a more automatic approach.
|
||||
func WalkVariables(body hcl.Body) WalkVariablesNode {
|
||||
return WalkVariablesNode{
|
||||
body: body,
|
||||
includeContent: true,
|
||||
}
|
||||
}
|
||||
|
||||
// WalkExpandVariables is like Variables but it includes only the variables
|
||||
// required for successful block expansion, ignoring any variables referenced
|
||||
// inside block contents. The result is the minimal set of all variables
|
||||
// required for a call to Expand, excluding variables that would only be
|
||||
// needed to subsequently call Content or PartialContent on the expanded
|
||||
// body.
|
||||
func WalkExpandVariables(body hcl.Body) WalkVariablesNode {
|
||||
return WalkVariablesNode{
|
||||
body: body,
|
||||
}
|
||||
}
|
||||
|
||||
type WalkVariablesNode struct {
|
||||
body hcl.Body
|
||||
it *iteration
|
||||
|
||||
includeContent bool
|
||||
}
|
||||
|
||||
type WalkVariablesChild struct {
|
||||
BlockTypeName string
|
||||
Node WalkVariablesNode
|
||||
}
|
||||
|
||||
// Body returns the HCL Body associated with the child node, in case the caller
|
||||
// wants to do some sort of inspection of it in order to decide what schema
|
||||
// to pass to Visit.
|
||||
//
|
||||
// Most implementations should just fetch a fixed schema based on the
|
||||
// BlockTypeName field and not access this. Deciding on a schema dynamically
|
||||
// based on the body is a strange thing to do and generally necessary only if
|
||||
// your caller is already doing other bizarre things with HCL bodies.
|
||||
func (c WalkVariablesChild) Body() hcl.Body {
|
||||
return c.Node.body
|
||||
}
|
||||
|
||||
// Visit returns the variable traversals required for any "dynamic" blocks
|
||||
// directly in the body associated with this node, and also returns any child
|
||||
// nodes that must be visited in order to continue the walk.
|
||||
//
|
||||
// Each child node has its associated block type name given in its BlockTypeName
|
||||
// field, which the calling application should use to determine the appropriate
|
||||
// schema for the content of each child node and pass it to the child node's
|
||||
// own Visit method to continue the walk recursively.
|
||||
func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) {
|
||||
extSchema := n.extendSchema(schema)
|
||||
container, _, _ := n.body.PartialContent(extSchema)
|
||||
if container == nil {
|
||||
return vars, children
|
||||
}
|
||||
|
||||
children = make([]WalkVariablesChild, 0, len(container.Blocks))
|
||||
|
||||
if n.includeContent {
|
||||
for _, attr := range container.Attributes {
|
||||
for _, traversal := range attr.Expr.Variables() {
|
||||
var ours, inherited bool
|
||||
if n.it != nil {
|
||||
ours = traversal.RootName() == n.it.IteratorName
|
||||
_, inherited = n.it.Inherited[traversal.RootName()]
|
||||
}
|
||||
|
||||
if !(ours || inherited) {
|
||||
vars = append(vars, traversal)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range container.Blocks {
|
||||
switch block.Type {
|
||||
|
||||
case "dynamic":
|
||||
blockTypeName := block.Labels[0]
|
||||
inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema)
|
||||
if inner == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
iteratorName := blockTypeName
|
||||
if attr, exists := inner.Attributes["iterator"]; exists {
|
||||
iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr)
|
||||
if len(iterTraversal) == 0 {
|
||||
// Ignore this invalid dynamic block, since it'll produce
|
||||
// an error if someone tries to extract content from it
|
||||
// later anyway.
|
||||
continue
|
||||
}
|
||||
iteratorName = iterTraversal.RootName()
|
||||
}
|
||||
blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal)
|
||||
|
||||
if attr, exists := inner.Attributes["for_each"]; exists {
|
||||
// Filter out iterator names inherited from parent blocks
|
||||
for _, traversal := range attr.Expr.Variables() {
|
||||
if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited {
|
||||
vars = append(vars, traversal)
|
||||
}
|
||||
}
|
||||
}
|
||||
if attr, exists := inner.Attributes["labels"]; exists {
|
||||
// Filter out both our own iterator name _and_ those inherited
|
||||
// from parent blocks, since we provide _both_ of these to the
|
||||
// label expressions.
|
||||
for _, traversal := range attr.Expr.Variables() {
|
||||
ours := traversal.RootName() == iteratorName
|
||||
_, inherited := blockIt.Inherited[traversal.RootName()]
|
||||
|
||||
if !(ours || inherited) {
|
||||
vars = append(vars, traversal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, contentBlock := range inner.Blocks {
|
||||
// We only request "content" blocks in our schema, so we know
|
||||
// any blocks we find here will be content blocks. We require
|
||||
// exactly one content block for actual expansion, but we'll
|
||||
// be more liberal here so that callers can still collect
|
||||
// variables from erroneous "dynamic" blocks.
|
||||
children = append(children, WalkVariablesChild{
|
||||
BlockTypeName: blockTypeName,
|
||||
Node: WalkVariablesNode{
|
||||
body: contentBlock.Body,
|
||||
it: blockIt,
|
||||
includeContent: n.includeContent,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
default:
|
||||
children = append(children, WalkVariablesChild{
|
||||
BlockTypeName: block.Type,
|
||||
Node: WalkVariablesNode{
|
||||
body: block.Body,
|
||||
it: n.it,
|
||||
includeContent: n.includeContent,
|
||||
},
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return vars, children
|
||||
}
|
||||
|
||||
func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema {
|
||||
// We augment the requested schema to also include our special "dynamic"
|
||||
// block type, since then we'll get instances of it interleaved with
|
||||
// all of the literal child blocks we must also include.
|
||||
extSchema := &hcl.BodySchema{
|
||||
Attributes: schema.Attributes,
|
||||
Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1),
|
||||
}
|
||||
copy(extSchema.Blocks, schema.Blocks)
|
||||
extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema)
|
||||
|
||||
return extSchema
|
||||
}
|
||||
|
||||
// This is a more relaxed schema than what's in schema.go, since we
|
||||
// want to maximize the amount of variables we can find even if there
|
||||
// are erroneous blocks.
|
||||
var variableDetectionInnerSchema = &hcl.BodySchema{
|
||||
Attributes: []hcl.AttributeSchema{
|
||||
{
|
||||
Name: "for_each",
|
||||
Required: false,
|
||||
},
|
||||
{
|
||||
Name: "labels",
|
||||
Required: false,
|
||||
},
|
||||
{
|
||||
Name: "iterator",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
Blocks: []hcl.BlockHeaderSchema{
|
||||
{
|
||||
Type: "content",
|
||||
},
|
||||
},
|
||||
}
|
||||
43
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go
generated
vendored
Normal file
43
vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package dynblock
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
)
|
||||
|
||||
// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec
|
||||
// specification to automatically drive the recursive walk through nested
|
||||
// blocks in the given body.
|
||||
//
|
||||
// This is a drop-in replacement for hcldec.Variables which is able to treat
|
||||
// blocks of type "dynamic" in the same special way that dynblock.Expand would,
|
||||
// exposing both the variables referenced in the "for_each" and "labels"
|
||||
// arguments and variables used in the nested "content" block.
|
||||
func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal {
|
||||
rootNode := WalkVariables(body)
|
||||
return walkVariablesWithHCLDec(rootNode, spec)
|
||||
}
|
||||
|
||||
// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the
|
||||
// minimal set of variables required to call Expand, ignoring variables that
|
||||
// are referenced only inside normal block contents. See WalkExpandVariables
|
||||
// for more information.
|
||||
func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal {
|
||||
rootNode := WalkExpandVariables(body)
|
||||
return walkVariablesWithHCLDec(rootNode, spec)
|
||||
}
|
||||
|
||||
func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal {
|
||||
vars, children := node.Visit(hcldec.ImpliedSchema(spec))
|
||||
|
||||
if len(children) > 0 {
|
||||
childSpecs := hcldec.ChildBlockTypes(spec)
|
||||
for _, child := range children {
|
||||
if childSpec, exists := childSpecs[child.BlockTypeName]; exists {
|
||||
vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return vars
|
||||
}
|
||||
67
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md
generated
vendored
Normal file
67
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# HCL Type Expressions Extension
|
||||
|
||||
This HCL extension defines a convention for describing HCL types using function
|
||||
call and variable reference syntax, allowing configuration formats to include
|
||||
type information provided by users.
|
||||
|
||||
The type syntax is processed statically from a hcl.Expression, so it cannot
|
||||
use any of the usual language operators. This is similar to type expressions
|
||||
in statically-typed programming languages.
|
||||
|
||||
```hcl
|
||||
variable "example" {
|
||||
type = list(string)
|
||||
}
|
||||
```
|
||||
|
||||
The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall`
|
||||
functions, and so it relies on the underlying syntax to define how "keyword"
|
||||
and "call" are interpreted. The above shows how they are interpreted in
|
||||
the HCL native syntax, while the following shows the same information
|
||||
expressed in JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"variable": {
|
||||
"example": {
|
||||
"type": "list(string)"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Notice that since we have additional contextual information that we intend
|
||||
to allow only calls and keywords the JSON syntax is able to parse the given
|
||||
string directly as an expression, rather than as a template as would be
|
||||
the case for normal expression evaluation.
|
||||
|
||||
For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr).
|
||||
|
||||
## Type Expression Syntax
|
||||
|
||||
When expressed in the native syntax, the following expressions are permitted
|
||||
in a type expression:
|
||||
|
||||
* `string` - string
|
||||
* `bool` - boolean
|
||||
* `number` - number
|
||||
* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only)
|
||||
* `list(<type_expr>)` - list of the type given as an argument
|
||||
* `set(<type_expr>)` - set of the type given as an argument
|
||||
* `map(<type_expr>)` - map of the type given as an argument
|
||||
* `tuple([<type_exprs...>])` - tuple with the element types given in the single list argument
|
||||
* `object({<attr_name>=<type_expr>, ...}` - object with the attributes and corresponding types given in the single map argument
|
||||
|
||||
For example:
|
||||
|
||||
* `list(string)`
|
||||
* `object({name=string,age=number})`
|
||||
* `map(object({name=string,age=number}))`
|
||||
|
||||
Note that the object constructor syntax is not fully-general for all possible
|
||||
object types because it requires the attribute names to be valid identifiers.
|
||||
In practice it is expected that any time an object type is being fixed for
|
||||
type checking it will be one that has identifiers as its attributes; object
|
||||
types with weird attributes generally show up only from arbitrary object
|
||||
constructors in configuration files, which are usually treated either as maps
|
||||
or as the dynamic pseudo-type.
|
||||
11
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Package typeexpr extends HCL with a convention for describing HCL types
|
||||
// within configuration files.
|
||||
//
|
||||
// The type syntax is processed statically from a hcl.Expression, so it cannot
|
||||
// use any of the usual language operators. This is similar to type expressions
|
||||
// in statically-typed programming languages.
|
||||
//
|
||||
// variable "example" {
|
||||
// type = list(string)
|
||||
// }
|
||||
package typeexpr
|
||||
196
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go
generated
vendored
Normal file
196
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go
generated
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
package typeexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
const invalidTypeSummary = "Invalid type specification"
|
||||
|
||||
// getType is the internal implementation of both Type and TypeConstraint,
|
||||
// using the passed flag to distinguish. When constraint is false, the "any"
|
||||
// keyword will produce an error.
|
||||
func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) {
|
||||
// First we'll try for one of our keywords
|
||||
kw := hcl.ExprAsKeyword(expr)
|
||||
switch kw {
|
||||
case "bool":
|
||||
return cty.Bool, nil
|
||||
case "string":
|
||||
return cty.String, nil
|
||||
case "number":
|
||||
return cty.Number, nil
|
||||
case "any":
|
||||
if constraint {
|
||||
return cty.DynamicPseudoType, nil
|
||||
}
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw),
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
case "list", "map", "set":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw),
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
case "object":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.",
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
case "tuple":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "The tuple type constructor requires one argument specifying the element types as a list.",
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
case "":
|
||||
// okay! we'll fall through and try processing as a call, then.
|
||||
default:
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw),
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
}
|
||||
|
||||
// If we get down here then our expression isn't just a keyword, so we'll
|
||||
// try to process it as a call instead.
|
||||
call, diags := hcl.ExprCall(expr)
|
||||
if diags.HasErrors() {
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).",
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
}
|
||||
|
||||
switch call.Name {
|
||||
case "bool", "string", "number", "any":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name),
|
||||
Subject: &call.ArgsRange,
|
||||
}}
|
||||
}
|
||||
|
||||
if len(call.Arguments) != 1 {
|
||||
contextRange := call.ArgsRange
|
||||
subjectRange := call.ArgsRange
|
||||
if len(call.Arguments) > 1 {
|
||||
// If we have too many arguments (as opposed to too _few_) then
|
||||
// we'll highlight the extraneous arguments as the diagnostic
|
||||
// subject.
|
||||
subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range())
|
||||
}
|
||||
|
||||
switch call.Name {
|
||||
case "list", "set", "map":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name),
|
||||
Subject: &subjectRange,
|
||||
Context: &contextRange,
|
||||
}}
|
||||
case "object":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.",
|
||||
Subject: &subjectRange,
|
||||
Context: &contextRange,
|
||||
}}
|
||||
case "tuple":
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "The tuple type constructor requires one argument specifying the element types as a list.",
|
||||
Subject: &subjectRange,
|
||||
Context: &contextRange,
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
switch call.Name {
|
||||
|
||||
case "list":
|
||||
ety, diags := getType(call.Arguments[0], constraint)
|
||||
return cty.List(ety), diags
|
||||
case "set":
|
||||
ety, diags := getType(call.Arguments[0], constraint)
|
||||
return cty.Set(ety), diags
|
||||
case "map":
|
||||
ety, diags := getType(call.Arguments[0], constraint)
|
||||
return cty.Map(ety), diags
|
||||
case "object":
|
||||
attrDefs, diags := hcl.ExprMap(call.Arguments[0])
|
||||
if diags.HasErrors() {
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.",
|
||||
Subject: call.Arguments[0].Range().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
}}
|
||||
}
|
||||
|
||||
atys := make(map[string]cty.Type)
|
||||
for _, attrDef := range attrDefs {
|
||||
attrName := hcl.ExprAsKeyword(attrDef.Key)
|
||||
if attrName == "" {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "Object constructor map keys must be attribute names.",
|
||||
Subject: attrDef.Key.Range().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
aty, attrDiags := getType(attrDef.Value, constraint)
|
||||
diags = append(diags, attrDiags...)
|
||||
atys[attrName] = aty
|
||||
}
|
||||
return cty.Object(atys), diags
|
||||
case "tuple":
|
||||
elemDefs, diags := hcl.ExprList(call.Arguments[0])
|
||||
if diags.HasErrors() {
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: "Tuple type constructor requires a list of element types.",
|
||||
Subject: call.Arguments[0].Range().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
}}
|
||||
}
|
||||
etys := make([]cty.Type, len(elemDefs))
|
||||
for i, defExpr := range elemDefs {
|
||||
ety, elemDiags := getType(defExpr, constraint)
|
||||
diags = append(diags, elemDiags...)
|
||||
etys[i] = ety
|
||||
}
|
||||
return cty.Tuple(etys), diags
|
||||
default:
|
||||
// Can't access call.Arguments in this path because we've not validated
|
||||
// that it contains exactly one expression here.
|
||||
return cty.DynamicPseudoType, hcl.Diagnostics{{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: invalidTypeSummary,
|
||||
Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name),
|
||||
Subject: expr.Range().Ptr(),
|
||||
}}
|
||||
}
|
||||
}
|
||||
129
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go
generated
vendored
Normal file
129
vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
package typeexpr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Type attempts to process the given expression as a type expression and, if
|
||||
// successful, returns the resulting type. If unsuccessful, error diagnostics
|
||||
// are returned.
|
||||
func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) {
|
||||
return getType(expr, false)
|
||||
}
|
||||
|
||||
// TypeConstraint attempts to parse the given expression as a type constraint
|
||||
// and, if successful, returns the resulting type. If unsuccessful, error
|
||||
// diagnostics are returned.
|
||||
//
|
||||
// A type constraint has the same structure as a type, but it additionally
|
||||
// allows the keyword "any" to represent cty.DynamicPseudoType, which is often
|
||||
// used as a wildcard in type checking and type conversion operations.
|
||||
func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) {
|
||||
return getType(expr, true)
|
||||
}
|
||||
|
||||
// TypeString returns a string rendering of the given type as it would be
|
||||
// expected to appear in the HCL native syntax.
|
||||
//
|
||||
// This is primarily intended for showing types to the user in an application
|
||||
// that uses typexpr, where the user can be assumed to be familiar with the
|
||||
// type expression syntax. In applications that do not use typeexpr these
|
||||
// results may be confusing to the user and so type.FriendlyName may be
|
||||
// preferable, even though it's less precise.
|
||||
//
|
||||
// TypeString produces reasonable results only for types like what would be
|
||||
// produced by the Type and TypeConstraint functions. In particular, it cannot
|
||||
// support capsule types.
|
||||
func TypeString(ty cty.Type) string {
|
||||
// Easy cases first
|
||||
switch ty {
|
||||
case cty.String:
|
||||
return "string"
|
||||
case cty.Bool:
|
||||
return "bool"
|
||||
case cty.Number:
|
||||
return "number"
|
||||
case cty.DynamicPseudoType:
|
||||
return "any"
|
||||
}
|
||||
|
||||
if ty.IsCapsuleType() {
|
||||
panic("TypeString does not support capsule types")
|
||||
}
|
||||
|
||||
if ty.IsCollectionType() {
|
||||
ety := ty.ElementType()
|
||||
etyString := TypeString(ety)
|
||||
switch {
|
||||
case ty.IsListType():
|
||||
return fmt.Sprintf("list(%s)", etyString)
|
||||
case ty.IsSetType():
|
||||
return fmt.Sprintf("set(%s)", etyString)
|
||||
case ty.IsMapType():
|
||||
return fmt.Sprintf("map(%s)", etyString)
|
||||
default:
|
||||
// Should never happen because the above is exhaustive
|
||||
panic("unsupported collection type")
|
||||
}
|
||||
}
|
||||
|
||||
if ty.IsObjectType() {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("object({")
|
||||
atys := ty.AttributeTypes()
|
||||
names := make([]string, 0, len(atys))
|
||||
for name := range atys {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
first := true
|
||||
for _, name := range names {
|
||||
aty := atys[name]
|
||||
if !first {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if !hclsyntax.ValidIdentifier(name) {
|
||||
// Should never happen for any type produced by this package,
|
||||
// but we'll do something reasonable here just so we don't
|
||||
// produce garbage if someone gives us a hand-assembled object
|
||||
// type that has weird attribute names.
|
||||
// Using Go-style quoting here isn't perfect, since it doesn't
|
||||
// exactly match HCL syntax, but it's fine for an edge-case.
|
||||
buf.WriteString(fmt.Sprintf("%q", name))
|
||||
} else {
|
||||
buf.WriteString(name)
|
||||
}
|
||||
buf.WriteByte('=')
|
||||
buf.WriteString(TypeString(aty))
|
||||
first = false
|
||||
}
|
||||
buf.WriteString("})")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
if ty.IsTupleType() {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("tuple([")
|
||||
etys := ty.TupleElementTypes()
|
||||
first := true
|
||||
for _, ety := range etys {
|
||||
if !first {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
buf.WriteString(TypeString(ety))
|
||||
first = false
|
||||
}
|
||||
buf.WriteString("])")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Should never happen because we covered all cases above.
|
||||
panic(fmt.Errorf("unsupported type %#v", ty))
|
||||
}
|
||||
304
vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go
generated
vendored
Normal file
304
vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go
generated
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
// DecodeBody extracts the configuration within the given body into the given
|
||||
// value. This value must be a non-nil pointer to either a struct or
|
||||
// a map, where in the former case the configuration will be decoded using
|
||||
// struct tags and in the latter case only attributes are allowed and their
|
||||
// values are decoded into the map.
|
||||
//
|
||||
// The given EvalContext is used to resolve any variables or functions in
|
||||
// expressions encountered while decoding. This may be nil to require only
|
||||
// constant values, for simple applications that do not support variables or
|
||||
// functions.
|
||||
//
|
||||
// The returned diagnostics should be inspected with its HasErrors method to
|
||||
// determine if the populated value is valid and complete. If error diagnostics
|
||||
// are returned then the given value may have been partially-populated but
|
||||
// may still be accessed by a careful caller for static analysis and editor
|
||||
// integration use-cases.
|
||||
func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
rv := reflect.ValueOf(val)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String()))
|
||||
}
|
||||
|
||||
return decodeBodyToValue(body, ctx, rv.Elem())
|
||||
}
|
||||
|
||||
func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||
et := val.Type()
|
||||
switch et.Kind() {
|
||||
case reflect.Struct:
|
||||
return decodeBodyToStruct(body, ctx, val)
|
||||
case reflect.Map:
|
||||
return decodeBodyToMap(body, ctx, val)
|
||||
default:
|
||||
panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String()))
|
||||
}
|
||||
}
|
||||
|
||||
func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics {
|
||||
schema, partial := ImpliedBodySchema(val.Interface())
|
||||
|
||||
var content *hcl.BodyContent
|
||||
var leftovers hcl.Body
|
||||
var diags hcl.Diagnostics
|
||||
if partial {
|
||||
content, leftovers, diags = body.PartialContent(schema)
|
||||
} else {
|
||||
content, diags = body.Content(schema)
|
||||
}
|
||||
if content == nil {
|
||||
return diags
|
||||
}
|
||||
|
||||
tags := getFieldTags(val.Type())
|
||||
|
||||
if tags.Remain != nil {
|
||||
fieldIdx := *tags.Remain
|
||||
field := val.Type().Field(fieldIdx)
|
||||
fieldV := val.Field(fieldIdx)
|
||||
switch {
|
||||
case bodyType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(leftovers))
|
||||
case attrsType.AssignableTo(field.Type):
|
||||
attrs, attrsDiags := leftovers.JustAttributes()
|
||||
if len(attrsDiags) > 0 {
|
||||
diags = append(diags, attrsDiags...)
|
||||
}
|
||||
fieldV.Set(reflect.ValueOf(attrs))
|
||||
default:
|
||||
diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...)
|
||||
}
|
||||
}
|
||||
|
||||
for name, fieldIdx := range tags.Attributes {
|
||||
attr := content.Attributes[name]
|
||||
field := val.Type().Field(fieldIdx)
|
||||
fieldV := val.Field(fieldIdx)
|
||||
|
||||
if attr == nil {
|
||||
if !exprType.AssignableTo(field.Type) {
|
||||
continue
|
||||
}
|
||||
|
||||
// As a special case, if the target is of type hcl.Expression then
|
||||
// we'll assign an actual expression that evalues to a cty null,
|
||||
// so the caller can deal with it within the cty realm rather
|
||||
// than within the Go realm.
|
||||
synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange())
|
||||
fieldV.Set(reflect.ValueOf(synthExpr))
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case attrType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(attr))
|
||||
case exprType.AssignableTo(field.Type):
|
||||
fieldV.Set(reflect.ValueOf(attr.Expr))
|
||||
default:
|
||||
diags = append(diags, DecodeExpression(
|
||||
attr.Expr, ctx, fieldV.Addr().Interface(),
|
||||
)...)
|
||||
}
|
||||
}
|
||||
|
||||
blocksByType := content.Blocks.ByType()
|
||||
|
||||
for typeName, fieldIdx := range tags.Blocks {
|
||||
blocks := blocksByType[typeName]
|
||||
field := val.Type().Field(fieldIdx)
|
||||
|
||||
ty := field.Type
|
||||
isSlice := false
|
||||
isPtr := false
|
||||
if ty.Kind() == reflect.Slice {
|
||||
isSlice = true
|
||||
ty = ty.Elem()
|
||||
}
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
isPtr = true
|
||||
ty = ty.Elem()
|
||||
}
|
||||
|
||||
if len(blocks) > 1 && !isSlice {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Duplicate %s block", typeName),
|
||||
Detail: fmt.Sprintf(
|
||||
"Only one %s block is allowed. Another was defined at %s.",
|
||||
typeName, blocks[0].DefRange.String(),
|
||||
),
|
||||
Subject: &blocks[1].DefRange,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
if isSlice || isPtr {
|
||||
val.Field(fieldIdx).Set(reflect.Zero(field.Type))
|
||||
} else {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Missing %s block", typeName),
|
||||
Detail: fmt.Sprintf("A %s block is required.", typeName),
|
||||
Subject: body.MissingItemRange().Ptr(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case isSlice:
|
||||
elemType := ty
|
||||
if isPtr {
|
||||
elemType = reflect.PtrTo(ty)
|
||||
}
|
||||
sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks))
|
||||
|
||||
for i, block := range blocks {
|
||||
if isPtr {
|
||||
v := reflect.New(ty)
|
||||
diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
|
||||
sli.Index(i).Set(v)
|
||||
} else {
|
||||
diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...)
|
||||
}
|
||||
}
|
||||
|
||||
val.Field(fieldIdx).Set(sli)
|
||||
|
||||
default:
|
||||
block := blocks[0]
|
||||
if isPtr {
|
||||
v := reflect.New(ty)
|
||||
diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...)
|
||||
val.Field(fieldIdx).Set(v)
|
||||
} else {
|
||||
diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||
attrs, diags := body.JustAttributes()
|
||||
if attrs == nil {
|
||||
return diags
|
||||
}
|
||||
|
||||
mv := reflect.MakeMap(v.Type())
|
||||
|
||||
for k, attr := range attrs {
|
||||
switch {
|
||||
case attrType.AssignableTo(v.Type().Elem()):
|
||||
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr))
|
||||
case exprType.AssignableTo(v.Type().Elem()):
|
||||
mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr))
|
||||
default:
|
||||
ev := reflect.New(v.Type().Elem())
|
||||
diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...)
|
||||
mv.SetMapIndex(reflect.ValueOf(k), ev.Elem())
|
||||
}
|
||||
}
|
||||
|
||||
v.Set(mv)
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
ty := v.Type()
|
||||
|
||||
switch {
|
||||
case blockType.AssignableTo(ty):
|
||||
v.Elem().Set(reflect.ValueOf(block))
|
||||
case bodyType.AssignableTo(ty):
|
||||
v.Elem().Set(reflect.ValueOf(block.Body))
|
||||
case attrsType.AssignableTo(ty):
|
||||
attrs, attrsDiags := block.Body.JustAttributes()
|
||||
if len(attrsDiags) > 0 {
|
||||
diags = append(diags, attrsDiags...)
|
||||
}
|
||||
v.Elem().Set(reflect.ValueOf(attrs))
|
||||
default:
|
||||
diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...)
|
||||
|
||||
if len(block.Labels) > 0 {
|
||||
blockTags := getFieldTags(ty)
|
||||
for li, lv := range block.Labels {
|
||||
lfieldIdx := blockTags.Labels[li].FieldIndex
|
||||
v.Field(lfieldIdx).Set(reflect.ValueOf(lv))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
|
||||
// DecodeExpression extracts the value of the given expression into the given
|
||||
// value. This value must be something that gocty is able to decode into,
|
||||
// since the final decoding is delegated to that package.
|
||||
//
|
||||
// The given EvalContext is used to resolve any variables or functions in
|
||||
// expressions encountered while decoding. This may be nil to require only
|
||||
// constant values, for simple applications that do not support variables or
|
||||
// functions.
|
||||
//
|
||||
// The returned diagnostics should be inspected with its HasErrors method to
|
||||
// determine if the populated value is valid and complete. If error diagnostics
|
||||
// are returned then the given value may have been partially-populated but
|
||||
// may still be accessed by a careful caller for static analysis and editor
|
||||
// integration use-cases.
|
||||
func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics {
|
||||
srcVal, diags := expr.Value(ctx)
|
||||
|
||||
convTy, err := gocty.ImpliedType(val)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err))
|
||||
}
|
||||
|
||||
srcVal, err = convert.Convert(srcVal, convTy)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsuitable value type",
|
||||
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
})
|
||||
return diags
|
||||
}
|
||||
|
||||
err = gocty.FromCtyValue(srcVal, val)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsuitable value type",
|
||||
Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()),
|
||||
Subject: expr.StartRange().Ptr(),
|
||||
Context: expr.Range().Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
return diags
|
||||
}
|
||||
53
vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Package gohcl allows decoding HCL configurations into Go data structures.
|
||||
//
|
||||
// It provides a convenient and concise way of describing the schema for
|
||||
// configuration and then accessing the resulting data via native Go
|
||||
// types.
|
||||
//
|
||||
// A struct field tag scheme is used, similar to other decoding and
|
||||
// unmarshalling libraries. The tags are formatted as in the following example:
|
||||
//
|
||||
// ThingType string `hcl:"thing_type,attr"`
|
||||
//
|
||||
// Within each tag there are two comma-separated tokens. The first is the
|
||||
// name of the corresponding construct in configuration, while the second
|
||||
// is a keyword giving the kind of construct expected. The following
|
||||
// kind keywords are supported:
|
||||
//
|
||||
// attr (the default) indicates that the value is to be populated from an attribute
|
||||
// block indicates that the value is to populated from a block
|
||||
// label indicates that the value is to populated from a block label
|
||||
// remain indicates that the value is to be populated from the remaining body after populating other fields
|
||||
//
|
||||
// "attr" fields may either be of type *hcl.Expression, in which case the raw
|
||||
// expression is assigned, or of any type accepted by gocty, in which case
|
||||
// gocty will be used to assign the value to a native Go type.
|
||||
//
|
||||
// "block" fields may be of type *hcl.Block or hcl.Body, in which case the
|
||||
// corresponding raw value is assigned, or may be a struct that recursively
|
||||
// uses the same tags. Block fields may also be slices of any of these types,
|
||||
// in which case multiple blocks of the corresponding type are decoded into
|
||||
// the slice.
|
||||
//
|
||||
// "label" fields are considered only in a struct used as the type of a field
|
||||
// marked as "block", and are used sequentially to capture the labels of
|
||||
// the blocks being decoded. In this case, the name token is used only as
|
||||
// an identifier for the label in diagnostic messages.
|
||||
//
|
||||
// "remain" can be placed on a single field that may be either of type
|
||||
// hcl.Body or hcl.Attributes, in which case any remaining body content is
|
||||
// placed into this field for delayed processing. If no "remain" field is
|
||||
// present then any attributes or blocks not matched by another valid tag
|
||||
// will cause an error diagnostic.
|
||||
//
|
||||
// Only a subset of this tagging/typing vocabulary is supported for the
|
||||
// "Encode" family of functions. See the EncodeIntoBody docs for full details
|
||||
// on the constraints there.
|
||||
//
|
||||
// Broadly-speaking this package deals with two types of error. The first is
|
||||
// errors in the configuration itself, which are returned as diagnostics
|
||||
// written with the configuration author as the target audience. The second
|
||||
// is bugs in the calling program, such as invalid struct tags, which are
|
||||
// surfaced via panics since there can be no useful runtime handling of such
|
||||
// errors and they should certainly not be returned to the user as diagnostics.
|
||||
package gohcl
|
||||
191
vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go
generated
vendored
Normal file
191
vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hclwrite"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
)
|
||||
|
||||
// EncodeIntoBody replaces the contents of the given hclwrite Body with
|
||||
// attributes and blocks derived from the given value, which must be a
|
||||
// struct value or a pointer to a struct value with the struct tags defined
|
||||
// in this package.
|
||||
//
|
||||
// This function can work only with fully-decoded data. It will ignore any
|
||||
// fields tagged as "remain", any fields that decode attributes into either
|
||||
// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
|
||||
// into hcl.Attributes values. This function does not have enough information
|
||||
// to complete the decoding of these types.
|
||||
//
|
||||
// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
|
||||
// to produce a whole hclwrite.Block including block labels.
|
||||
//
|
||||
// As long as a suitable value is given to encode and the destination body
|
||||
// is non-nil, this function will always complete. It will panic in case of
|
||||
// any errors in the calling program, such as passing an inappropriate type
|
||||
// or a nil body.
|
||||
//
|
||||
// The layout of the resulting HCL source is derived from the ordering of
|
||||
// the struct fields, with blank lines around nested blocks of different types.
|
||||
// Fields representing attributes should usually precede those representing
|
||||
// blocks so that the attributes can group togather in the result. For more
|
||||
// control, use the hclwrite API directly.
|
||||
func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
populateBody(rv, ty, tags, dst)
|
||||
}
|
||||
|
||||
// EncodeAsBlock creates a new hclwrite.Block populated with the data from
|
||||
// the given value, which must be a struct or pointer to struct with the
|
||||
// struct tags defined in this package.
|
||||
//
|
||||
// If the given struct type has fields tagged with "label" tags then they
|
||||
// will be used in order to annotate the created block with labels.
|
||||
//
|
||||
// This function has the same constraints as EncodeIntoBody and will panic
|
||||
// if they are violated.
|
||||
func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
|
||||
rv := reflect.ValueOf(val)
|
||||
ty := rv.Type()
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
ty = rv.Type()
|
||||
}
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
|
||||
}
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
labels := make([]string, len(tags.Labels))
|
||||
for i, lf := range tags.Labels {
|
||||
lv := rv.Field(lf.FieldIndex)
|
||||
// We just stringify whatever we find. It should always be a string
|
||||
// but if not then we'll still do something reasonable.
|
||||
labels[i] = fmt.Sprintf("%s", lv.Interface())
|
||||
}
|
||||
|
||||
block := hclwrite.NewBlock(blockType, labels)
|
||||
populateBody(rv, ty, tags, block.Body())
|
||||
return block
|
||||
}
|
||||
|
||||
func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
|
||||
nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
|
||||
namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
|
||||
for n, i := range tags.Attributes {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
for n, i := range tags.Blocks {
|
||||
nameIdxs[n] = i
|
||||
namesOrder = append(namesOrder, n)
|
||||
}
|
||||
sort.SliceStable(namesOrder, func(i, j int) bool {
|
||||
ni, nj := namesOrder[i], namesOrder[j]
|
||||
return nameIdxs[ni] < nameIdxs[nj]
|
||||
})
|
||||
|
||||
dst.Clear()
|
||||
|
||||
prevWasBlock := false
|
||||
for _, name := range namesOrder {
|
||||
fieldIdx := nameIdxs[name]
|
||||
field := ty.Field(fieldIdx)
|
||||
fieldTy := field.Type
|
||||
fieldVal := rv.Field(fieldIdx)
|
||||
|
||||
if fieldTy.Kind() == reflect.Ptr {
|
||||
fieldTy = fieldTy.Elem()
|
||||
fieldVal = fieldVal.Elem()
|
||||
}
|
||||
|
||||
if _, isAttr := tags.Attributes[name]; isAttr {
|
||||
|
||||
if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
if prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = false
|
||||
}
|
||||
|
||||
valTy, err := gocty.ImpliedType(fieldVal.Interface())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
|
||||
}
|
||||
|
||||
val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
|
||||
if err != nil {
|
||||
// This should never happen, since we should always be able
|
||||
// to decode into the implied type.
|
||||
panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
|
||||
}
|
||||
|
||||
dst.SetAttributeValue(name, val)
|
||||
|
||||
} else { // must be a block, then
|
||||
elemTy := fieldTy
|
||||
isSeq := false
|
||||
if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
|
||||
isSeq = true
|
||||
elemTy = elemTy.Elem()
|
||||
}
|
||||
|
||||
if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
|
||||
continue // ignore undecoded fields
|
||||
}
|
||||
prevWasBlock = false
|
||||
|
||||
if isSeq {
|
||||
l := fieldVal.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
elemVal := fieldVal.Index(i)
|
||||
if !elemVal.IsValid() {
|
||||
continue // ignore (elem value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(elemVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
} else {
|
||||
if !fieldVal.IsValid() {
|
||||
continue // ignore (field value is nil pointer)
|
||||
}
|
||||
if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
|
||||
continue // ignore
|
||||
}
|
||||
block := EncodeAsBlock(fieldVal.Interface(), name)
|
||||
if !prevWasBlock {
|
||||
dst.AppendNewline()
|
||||
prevWasBlock = true
|
||||
}
|
||||
dst.AppendBlock(block)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
174
vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go
generated
vendored
Normal file
174
vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the
|
||||
// given value, which must be a struct value or a pointer to one. If an
|
||||
// inappropriate value is passed, this function will panic.
|
||||
//
|
||||
// The second return argument indicates whether the given struct includes
|
||||
// a "remain" field, and thus the returned schema is non-exhaustive.
|
||||
//
|
||||
// This uses the tags on the fields of the struct to discover how each
|
||||
// field's value should be expressed within configuration. If an invalid
|
||||
// mapping is attempted, this function will panic.
|
||||
func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) {
|
||||
ty := reflect.TypeOf(val)
|
||||
|
||||
if ty.Kind() == reflect.Ptr {
|
||||
ty = ty.Elem()
|
||||
}
|
||||
|
||||
if ty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("given value must be struct, not %T", val))
|
||||
}
|
||||
|
||||
var attrSchemas []hcl.AttributeSchema
|
||||
var blockSchemas []hcl.BlockHeaderSchema
|
||||
|
||||
tags := getFieldTags(ty)
|
||||
|
||||
attrNames := make([]string, 0, len(tags.Attributes))
|
||||
for n := range tags.Attributes {
|
||||
attrNames = append(attrNames, n)
|
||||
}
|
||||
sort.Strings(attrNames)
|
||||
for _, n := range attrNames {
|
||||
idx := tags.Attributes[n]
|
||||
optional := tags.Optional[n]
|
||||
field := ty.Field(idx)
|
||||
|
||||
var required bool
|
||||
|
||||
switch {
|
||||
case field.Type.AssignableTo(exprType):
|
||||
// If we're decoding to hcl.Expression then absense can be
|
||||
// indicated via a null value, so we don't specify that
|
||||
// the field is required during decoding.
|
||||
required = false
|
||||
case field.Type.Kind() != reflect.Ptr && !optional:
|
||||
required = true
|
||||
default:
|
||||
required = false
|
||||
}
|
||||
|
||||
attrSchemas = append(attrSchemas, hcl.AttributeSchema{
|
||||
Name: n,
|
||||
Required: required,
|
||||
})
|
||||
}
|
||||
|
||||
blockNames := make([]string, 0, len(tags.Blocks))
|
||||
for n := range tags.Blocks {
|
||||
blockNames = append(blockNames, n)
|
||||
}
|
||||
sort.Strings(blockNames)
|
||||
for _, n := range blockNames {
|
||||
idx := tags.Blocks[n]
|
||||
field := ty.Field(idx)
|
||||
fty := field.Type
|
||||
if fty.Kind() == reflect.Slice {
|
||||
fty = fty.Elem()
|
||||
}
|
||||
if fty.Kind() == reflect.Ptr {
|
||||
fty = fty.Elem()
|
||||
}
|
||||
if fty.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf(
|
||||
"hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name,
|
||||
))
|
||||
}
|
||||
ftags := getFieldTags(fty)
|
||||
var labelNames []string
|
||||
if len(ftags.Labels) > 0 {
|
||||
labelNames = make([]string, len(ftags.Labels))
|
||||
for i, l := range ftags.Labels {
|
||||
labelNames[i] = l.Name
|
||||
}
|
||||
}
|
||||
|
||||
blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{
|
||||
Type: n,
|
||||
LabelNames: labelNames,
|
||||
})
|
||||
}
|
||||
|
||||
partial = tags.Remain != nil
|
||||
schema = &hcl.BodySchema{
|
||||
Attributes: attrSchemas,
|
||||
Blocks: blockSchemas,
|
||||
}
|
||||
return schema, partial
|
||||
}
|
||||
|
||||
type fieldTags struct {
|
||||
Attributes map[string]int
|
||||
Blocks map[string]int
|
||||
Labels []labelField
|
||||
Remain *int
|
||||
Optional map[string]bool
|
||||
}
|
||||
|
||||
type labelField struct {
|
||||
FieldIndex int
|
||||
Name string
|
||||
}
|
||||
|
||||
func getFieldTags(ty reflect.Type) *fieldTags {
|
||||
ret := &fieldTags{
|
||||
Attributes: map[string]int{},
|
||||
Blocks: map[string]int{},
|
||||
Optional: map[string]bool{},
|
||||
}
|
||||
|
||||
ct := ty.NumField()
|
||||
for i := 0; i < ct; i++ {
|
||||
field := ty.Field(i)
|
||||
tag := field.Tag.Get("hcl")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
comma := strings.Index(tag, ",")
|
||||
var name, kind string
|
||||
if comma != -1 {
|
||||
name = tag[:comma]
|
||||
kind = tag[comma+1:]
|
||||
} else {
|
||||
name = tag
|
||||
kind = "attr"
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case "attr":
|
||||
ret.Attributes[name] = i
|
||||
case "block":
|
||||
ret.Blocks[name] = i
|
||||
case "label":
|
||||
ret.Labels = append(ret.Labels, labelField{
|
||||
FieldIndex: i,
|
||||
Name: name,
|
||||
})
|
||||
case "remain":
|
||||
if ret.Remain != nil {
|
||||
panic("only one 'remain' tag is permitted")
|
||||
}
|
||||
idx := i // copy, because this loop will continue assigning to i
|
||||
ret.Remain = &idx
|
||||
case "optional":
|
||||
ret.Attributes[name] = i
|
||||
ret.Optional[name] = true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
16
vendor/github.com/hashicorp/hcl/v2/gohcl/types.go
generated
vendored
Normal file
16
vendor/github.com/hashicorp/hcl/v2/gohcl/types.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package gohcl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
var victimExpr hcl.Expression
|
||||
var victimBody hcl.Body
|
||||
|
||||
var exprType = reflect.TypeOf(&victimExpr).Elem()
|
||||
var bodyType = reflect.TypeOf(&victimBody).Elem()
|
||||
var blockType = reflect.TypeOf((*hcl.Block)(nil))
|
||||
var attrType = reflect.TypeOf((*hcl.Attribute)(nil))
|
||||
var attrsType = reflect.TypeOf(hcl.Attributes(nil))
|
||||
21
vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
type blockLabel struct {
|
||||
Value string
|
||||
Range hcl.Range
|
||||
}
|
||||
|
||||
func labelsForBlock(block *hcl.Block) []blockLabel {
|
||||
ret := make([]blockLabel, len(block.Labels))
|
||||
for i := range block.Labels {
|
||||
ret[i] = blockLabel{
|
||||
Value: block.Labels[i],
|
||||
Range: block.LabelRanges[i],
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
36
vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) {
|
||||
schema := ImpliedSchema(spec)
|
||||
|
||||
var content *hcl.BodyContent
|
||||
var diags hcl.Diagnostics
|
||||
var leftovers hcl.Body
|
||||
|
||||
if partial {
|
||||
content, leftovers, diags = body.PartialContent(schema)
|
||||
} else {
|
||||
content, diags = body.Content(schema)
|
||||
}
|
||||
|
||||
val, valDiags := spec.decode(content, blockLabels, ctx)
|
||||
diags = append(diags, valDiags...)
|
||||
|
||||
return val, leftovers, diags
|
||||
}
|
||||
|
||||
func impliedType(spec Spec) cty.Type {
|
||||
return spec.impliedType()
|
||||
}
|
||||
|
||||
func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range {
|
||||
schema := ImpliedSchema(spec)
|
||||
content, _, _ := body.PartialContent(schema)
|
||||
|
||||
return spec.sourceRange(content, blockLabels)
|
||||
}
|
||||
12
vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go
generated
vendored
Normal file
12
vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Package hcldec provides a higher-level API for unpacking the content of
|
||||
// HCL bodies, implemented in terms of the low-level "Content" API exposed
|
||||
// by the bodies themselves.
|
||||
//
|
||||
// It allows decoding an entire nested configuration in a single operation
|
||||
// by providing a description of the intended structure.
|
||||
//
|
||||
// For some applications it may be more convenient to use the "gohcl"
|
||||
// package, which has a similar purpose but decodes directly into native
|
||||
// Go data types. hcldec instead targets the cty type system, and thus allows
|
||||
// a cty-driven application to remain within that type system.
|
||||
package hcldec
|
||||
23
vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go
generated
vendored
Normal file
23
vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Every Spec implementation should be registered with gob, so that
|
||||
// specs can be sent over gob channels, such as using
|
||||
// github.com/hashicorp/go-plugin with plugins that need to describe
|
||||
// what shape of configuration they are expecting.
|
||||
gob.Register(ObjectSpec(nil))
|
||||
gob.Register(TupleSpec(nil))
|
||||
gob.Register((*AttrSpec)(nil))
|
||||
gob.Register((*LiteralSpec)(nil))
|
||||
gob.Register((*ExprSpec)(nil))
|
||||
gob.Register((*BlockSpec)(nil))
|
||||
gob.Register((*BlockListSpec)(nil))
|
||||
gob.Register((*BlockSetSpec)(nil))
|
||||
gob.Register((*BlockMapSpec)(nil))
|
||||
gob.Register((*BlockLabelSpec)(nil))
|
||||
gob.Register((*DefaultSpec)(nil))
|
||||
}
|
||||
81
vendor/github.com/hashicorp/hcl/v2/hcldec/public.go
generated
vendored
Normal file
81
vendor/github.com/hashicorp/hcl/v2/hcldec/public.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Decode interprets the given body using the given specification and returns
|
||||
// the resulting value. If the given body is not valid per the spec, error
|
||||
// diagnostics are returned and the returned value is likely to be incomplete.
|
||||
//
|
||||
// The ctx argument may be nil, in which case any references to variables or
|
||||
// functions will produce error diagnostics.
|
||||
func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
val, _, diags := decode(body, nil, ctx, spec, false)
|
||||
return val, diags
|
||||
}
|
||||
|
||||
// PartialDecode is like Decode except that it permits "leftover" items in
|
||||
// the top-level body, which are returned as a new body to allow for
|
||||
// further processing.
|
||||
//
|
||||
// Any descendent block bodies are _not_ decoded partially and thus must
|
||||
// be fully described by the given specification.
|
||||
func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) {
|
||||
return decode(body, nil, ctx, spec, true)
|
||||
}
|
||||
|
||||
// ImpliedType returns the value type that should result from decoding the
|
||||
// given spec.
|
||||
func ImpliedType(spec Spec) cty.Type {
|
||||
return impliedType(spec)
|
||||
}
|
||||
|
||||
// SourceRange interprets the given body using the given specification and
|
||||
// then returns the source range of the value that would be used to
|
||||
// fulfill the spec.
|
||||
//
|
||||
// This can be used if application-level validation detects value errors, to
|
||||
// obtain a reasonable SourceRange to use for generated diagnostics. It works
|
||||
// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...)
|
||||
// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will
|
||||
// be less useful the broader the specification, so e.g. a spec that returns
|
||||
// the entirety of all of the blocks of a given type is likely to be
|
||||
// _particularly_ arbitrary and useless.
|
||||
//
|
||||
// If the given body is not valid per the given spec, the result is best-effort
|
||||
// and may not actually be something ideal. It's expected that an application
|
||||
// will already have used Decode or PartialDecode earlier and thus had an
|
||||
// opportunity to detect and report spec violations.
|
||||
func SourceRange(body hcl.Body, spec Spec) hcl.Range {
|
||||
return sourceRange(body, nil, spec)
|
||||
}
|
||||
|
||||
// ChildBlockTypes returns a map of all of the child block types declared
|
||||
// by the given spec, with block type names as keys and the associated
|
||||
// nested body specs as values.
|
||||
func ChildBlockTypes(spec Spec) map[string]Spec {
|
||||
ret := map[string]Spec{}
|
||||
|
||||
// visitSameBodyChildren walks through the spec structure, calling
|
||||
// the given callback for each descendent spec encountered. We are
|
||||
// interested in the specs that reference attributes and blocks.
|
||||
var visit visitFunc
|
||||
visit = func(s Spec) {
|
||||
if bs, ok := s.(blockSpec); ok {
|
||||
for _, blockS := range bs.blockHeaderSchemata() {
|
||||
nested := bs.nestedSpec()
|
||||
if nested != nil { // nil can be returned to dynamically opt out of this interface
|
||||
ret[blockS.Type] = nested
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.visitSameBodyChildren(visit)
|
||||
}
|
||||
|
||||
visit(spec)
|
||||
|
||||
return ret
|
||||
}
|
||||
36
vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// ImpliedSchema returns the *hcl.BodySchema implied by the given specification.
|
||||
// This is the schema that the Decode function will use internally to
|
||||
// access the content of a given body.
|
||||
func ImpliedSchema(spec Spec) *hcl.BodySchema {
|
||||
var attrs []hcl.AttributeSchema
|
||||
var blocks []hcl.BlockHeaderSchema
|
||||
|
||||
// visitSameBodyChildren walks through the spec structure, calling
|
||||
// the given callback for each descendent spec encountered. We are
|
||||
// interested in the specs that reference attributes and blocks.
|
||||
var visit visitFunc
|
||||
visit = func(s Spec) {
|
||||
if as, ok := s.(attrSpec); ok {
|
||||
attrs = append(attrs, as.attrSchemata()...)
|
||||
}
|
||||
|
||||
if bs, ok := s.(blockSpec); ok {
|
||||
blocks = append(blocks, bs.blockHeaderSchemata()...)
|
||||
}
|
||||
|
||||
s.visitSameBodyChildren(visit)
|
||||
}
|
||||
|
||||
visit(spec)
|
||||
|
||||
return &hcl.BodySchema{
|
||||
Attributes: attrs,
|
||||
Blocks: blocks,
|
||||
}
|
||||
}
|
||||
1567
vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go
generated
vendored
Normal file
1567
vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
36
vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go
generated
vendored
Normal file
36
vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package hcldec
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// Variables processes the given body with the given spec and returns a
|
||||
// list of the variable traversals that would be required to decode
|
||||
// the same pairing of body and spec.
|
||||
//
|
||||
// This can be used to conditionally populate the variables in the EvalContext
|
||||
// passed to Decode, for applications where a static scope is insufficient.
|
||||
//
|
||||
// If the given body is not compliant with the given schema, the result may
|
||||
// be incomplete, but that's assumed to be okay because the eventual call
|
||||
// to Decode will produce error diagnostics anyway.
|
||||
func Variables(body hcl.Body, spec Spec) []hcl.Traversal {
|
||||
var vars []hcl.Traversal
|
||||
schema := ImpliedSchema(spec)
|
||||
content, _, _ := body.PartialContent(schema)
|
||||
|
||||
if vs, ok := spec.(specNeedingVariables); ok {
|
||||
vars = append(vars, vs.variablesNeeded(content)...)
|
||||
}
|
||||
|
||||
var visitFn visitFunc
|
||||
visitFn = func(s Spec) {
|
||||
if vs, ok := s.(specNeedingVariables); ok {
|
||||
vars = append(vars, vs.variablesNeeded(content)...)
|
||||
}
|
||||
s.visitSameBodyChildren(visitFn)
|
||||
}
|
||||
spec.visitSameBodyChildren(visitFn)
|
||||
|
||||
return vars
|
||||
}
|
||||
4
vendor/github.com/hashicorp/hcl/v2/hcled/doc.go
generated
vendored
Normal file
4
vendor/github.com/hashicorp/hcl/v2/hcled/doc.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// Package hcled provides functionality intended to help an application
|
||||
// that embeds HCL to deliver relevant information to a text editor or IDE
|
||||
// for navigating around and analyzing configuration files.
|
||||
package hcled
|
||||
34
vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go
generated
vendored
Normal file
34
vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package hcled
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
type contextStringer interface {
|
||||
ContextString(offset int) string
|
||||
}
|
||||
|
||||
// ContextString returns a string describing the context of the given byte
|
||||
// offset, if available. An empty string is returned if no such information
|
||||
// is available, or otherwise the returned string is in a form that depends
|
||||
// on the language used to write the referenced file.
|
||||
func ContextString(file *hcl.File, offset int) string {
|
||||
if cser, ok := file.Nav.(contextStringer); ok {
|
||||
return cser.ContextString(offset)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type contextDefRanger interface {
|
||||
ContextDefRange(offset int) hcl.Range
|
||||
}
|
||||
|
||||
func ContextDefRange(file *hcl.File, offset int) hcl.Range {
|
||||
if cser, ok := file.Nav.(contextDefRanger); ok {
|
||||
defRange := cser.ContextDefRange(offset)
|
||||
if !defRange.Empty() {
|
||||
return defRange
|
||||
}
|
||||
}
|
||||
return file.Body.MissingItemRange()
|
||||
}
|
||||
135
vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go
generated
vendored
Normal file
135
vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Package hclparse has the main API entry point for parsing both HCL native
|
||||
// syntax and HCL JSON.
|
||||
//
|
||||
// The main HCL package also includes SimpleParse and SimpleParseFile which
|
||||
// can be a simpler interface for the common case where an application just
|
||||
// needs to parse a single file. The gohcl package simplifies that further
|
||||
// in its SimpleDecode function, which combines hcl.SimpleParse with decoding
|
||||
// into Go struct values
|
||||
//
|
||||
// Package hclparse, then, is useful for applications that require more fine
|
||||
// control over parsing or which need to load many separate files and keep
|
||||
// track of them for possible error reporting or other analysis.
|
||||
package hclparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2/json"
|
||||
)
|
||||
|
||||
// NOTE: This is the public interface for parsing. The actual parsers are
|
||||
// in other packages alongside this one, with this package just wrapping them
|
||||
// to provide a unified interface for the caller across all supported formats.
|
||||
|
||||
// Parser is the main interface for parsing configuration files. As well as
|
||||
// parsing files, a parser also retains a registry of all of the files it
|
||||
// has parsed so that multiple attempts to parse the same file will return
|
||||
// the same object and so the collected files can be used when printing
|
||||
// diagnostics.
|
||||
//
|
||||
// Any diagnostics for parsing a file are only returned once on the first
|
||||
// call to parse that file. Callers are expected to collect up diagnostics
|
||||
// and present them together, so returning diagnostics for the same file
|
||||
// multiple times would create a confusing result.
|
||||
type Parser struct {
|
||||
files map[string]*hcl.File
|
||||
}
|
||||
|
||||
// NewParser creates a new parser, ready to parse configuration files.
|
||||
func NewParser() *Parser {
|
||||
return &Parser{
|
||||
files: map[string]*hcl.File{},
|
||||
}
|
||||
}
|
||||
|
||||
// ParseHCL parses the given buffer (which is assumed to have been loaded from
|
||||
// the given filename) as a native-syntax configuration file and returns the
|
||||
// hcl.File object representing it.
|
||||
func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1})
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// ParseHCLFile reads the given filename and parses it as a native-syntax HCL
|
||||
// configuration file. An error diagnostic is returned if the given file
|
||||
// cannot be read.
|
||||
func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Failed to read file",
|
||||
Detail: fmt.Sprintf("The configuration file %q could not be read.", filename),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return p.ParseHCL(src, filename)
|
||||
}
|
||||
|
||||
// ParseJSON parses the given JSON buffer (which is assumed to have been loaded
|
||||
// from the given filename) and returns the hcl.File object representing it.
|
||||
func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := json.Parse(src, filename)
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// ParseJSONFile reads the given filename and parses it as JSON, similarly to
|
||||
// ParseJSON. An error diagnostic is returned if the given file cannot be read.
|
||||
func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) {
|
||||
if existing := p.files[filename]; existing != nil {
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
file, diags := json.ParseFile(filename)
|
||||
p.files[filename] = file
|
||||
return file, diags
|
||||
}
|
||||
|
||||
// AddFile allows a caller to record in a parser a file that was parsed some
|
||||
// other way, thus allowing it to be included in the registry of sources.
|
||||
func (p *Parser) AddFile(filename string, file *hcl.File) {
|
||||
p.files[filename] = file
|
||||
}
|
||||
|
||||
// Sources returns a map from filenames to the raw source code that was
|
||||
// read from them. This is intended to be used, for example, to print
|
||||
// diagnostics with contextual information.
|
||||
//
|
||||
// The arrays underlying the returned slices should not be modified.
|
||||
func (p *Parser) Sources() map[string][]byte {
|
||||
ret := make(map[string][]byte)
|
||||
for fn, f := range p.files {
|
||||
ret[fn] = f.Bytes
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Files returns a map from filenames to the File objects produced from them.
|
||||
// This is intended to be used, for example, to print diagnostics with
|
||||
// contextual information.
|
||||
//
|
||||
// The returned map and all of the objects it refers to directly or indirectly
|
||||
// must not be modified.
|
||||
func (p *Parser) Files() map[string]*hcl.File {
|
||||
return p.files
|
||||
}
|
||||
23
vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go
generated
vendored
Normal file
23
vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// setDiagEvalContext is an internal helper that will impose a particular
|
||||
// EvalContext on a set of diagnostics in-place, for any diagnostic that
|
||||
// does not already have an EvalContext set.
|
||||
//
|
||||
// We generally expect diagnostics to be immutable, but this is safe to use
|
||||
// on any Diagnostics where none of the contained Diagnostic objects have yet
|
||||
// been seen by a caller. Its purpose is to apply additional context to a
|
||||
// set of diagnostics produced by a "deeper" component as the stack unwinds
|
||||
// during expression evaluation.
|
||||
func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) {
|
||||
for _, diag := range diags {
|
||||
if diag.Expression == nil {
|
||||
diag.Expression = expr
|
||||
diag.EvalContext = ctx
|
||||
}
|
||||
}
|
||||
}
|
||||
24
vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go
generated
vendored
Normal file
24
vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/agext/levenshtein"
|
||||
)
|
||||
|
||||
// nameSuggestion tries to find a name from the given slice of suggested names
|
||||
// that is close to the given name and returns it if found. If no suggestion
|
||||
// is close enough, returns the empty string.
|
||||
//
|
||||
// The suggestions are tried in order, so earlier suggestions take precedence
|
||||
// if the given string is similar to two or more suggestions.
|
||||
//
|
||||
// This function is intended to be used with a relatively-small number of
|
||||
// suggestions. It's not optimized for hundreds or thousands of them.
|
||||
func nameSuggestion(given string, suggestions []string) string {
|
||||
for _, suggestion := range suggestions {
|
||||
dist := levenshtein.Distance(given, suggestion, nil)
|
||||
if dist < 3 { // threshold determined experimentally
|
||||
return suggestion
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
7
vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go
generated
vendored
Normal file
7
vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Package hclsyntax contains the parser, AST, etc for HCL's native language,
|
||||
// as opposed to the JSON variant.
|
||||
//
|
||||
// In normal use applications should rarely depend on this package directly,
|
||||
// instead preferring the higher-level interface of the main hcl package and
|
||||
// its companion package hclparse.
|
||||
package hclsyntax
|
||||
1477
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go
generated
vendored
Normal file
1477
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
268
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go
generated
vendored
Normal file
268
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go
generated
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||
)
|
||||
|
||||
type Operation struct {
|
||||
Impl function.Function
|
||||
Type cty.Type
|
||||
}
|
||||
|
||||
var (
|
||||
OpLogicalOr = &Operation{
|
||||
Impl: stdlib.OrFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpLogicalAnd = &Operation{
|
||||
Impl: stdlib.AndFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpLogicalNot = &Operation{
|
||||
Impl: stdlib.NotFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
|
||||
OpEqual = &Operation{
|
||||
Impl: stdlib.EqualFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpNotEqual = &Operation{
|
||||
Impl: stdlib.NotEqualFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
|
||||
OpGreaterThan = &Operation{
|
||||
Impl: stdlib.GreaterThanFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpGreaterThanOrEqual = &Operation{
|
||||
Impl: stdlib.GreaterThanOrEqualToFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpLessThan = &Operation{
|
||||
Impl: stdlib.LessThanFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
OpLessThanOrEqual = &Operation{
|
||||
Impl: stdlib.LessThanOrEqualToFunc,
|
||||
Type: cty.Bool,
|
||||
}
|
||||
|
||||
OpAdd = &Operation{
|
||||
Impl: stdlib.AddFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
OpSubtract = &Operation{
|
||||
Impl: stdlib.SubtractFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
OpMultiply = &Operation{
|
||||
Impl: stdlib.MultiplyFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
OpDivide = &Operation{
|
||||
Impl: stdlib.DivideFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
OpModulo = &Operation{
|
||||
Impl: stdlib.ModuloFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
OpNegate = &Operation{
|
||||
Impl: stdlib.NegateFunc,
|
||||
Type: cty.Number,
|
||||
}
|
||||
)
|
||||
|
||||
var binaryOps []map[TokenType]*Operation
|
||||
|
||||
func init() {
|
||||
// This operation table maps from the operator's token type
|
||||
// to the AST operation type. All expressions produced from
|
||||
// binary operators are BinaryOp nodes.
|
||||
//
|
||||
// Binary operator groups are listed in order of precedence, with
|
||||
// the *lowest* precedence first. Operators within the same group
|
||||
// have left-to-right associativity.
|
||||
binaryOps = []map[TokenType]*Operation{
|
||||
{
|
||||
TokenOr: OpLogicalOr,
|
||||
},
|
||||
{
|
||||
TokenAnd: OpLogicalAnd,
|
||||
},
|
||||
{
|
||||
TokenEqualOp: OpEqual,
|
||||
TokenNotEqual: OpNotEqual,
|
||||
},
|
||||
{
|
||||
TokenGreaterThan: OpGreaterThan,
|
||||
TokenGreaterThanEq: OpGreaterThanOrEqual,
|
||||
TokenLessThan: OpLessThan,
|
||||
TokenLessThanEq: OpLessThanOrEqual,
|
||||
},
|
||||
{
|
||||
TokenPlus: OpAdd,
|
||||
TokenMinus: OpSubtract,
|
||||
},
|
||||
{
|
||||
TokenStar: OpMultiply,
|
||||
TokenSlash: OpDivide,
|
||||
TokenPercent: OpModulo,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type BinaryOpExpr struct {
|
||||
LHS Expression
|
||||
Op *Operation
|
||||
RHS Expression
|
||||
|
||||
SrcRange hcl.Range
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
|
||||
w(e.LHS)
|
||||
w(e.RHS)
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
impl := e.Op.Impl // assumed to be a function taking exactly two arguments
|
||||
params := impl.Params()
|
||||
lhsParam := params[0]
|
||||
rhsParam := params[1]
|
||||
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
givenLHSVal, lhsDiags := e.LHS.Value(ctx)
|
||||
givenRHSVal, rhsDiags := e.RHS.Value(ctx)
|
||||
diags = append(diags, lhsDiags...)
|
||||
diags = append(diags, rhsDiags...)
|
||||
|
||||
lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid operand",
|
||||
Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err),
|
||||
Subject: e.LHS.Range().Ptr(),
|
||||
Context: &e.SrcRange,
|
||||
Expression: e.LHS,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
}
|
||||
rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid operand",
|
||||
Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err),
|
||||
Subject: e.RHS.Range().Ptr(),
|
||||
Context: &e.SrcRange,
|
||||
Expression: e.RHS,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
}
|
||||
|
||||
if diags.HasErrors() {
|
||||
// Don't actually try the call if we have errors already, since the
|
||||
// this will probably just produce a confusing duplicative diagnostic.
|
||||
return cty.UnknownVal(e.Op.Type), diags
|
||||
}
|
||||
|
||||
args := []cty.Value{lhsVal, rhsVal}
|
||||
result, err := impl.Call(args)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
// FIXME: This diagnostic is useless.
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Operation failed",
|
||||
Detail: fmt.Sprintf("Error during operation: %s.", err),
|
||||
Subject: &e.SrcRange,
|
||||
Expression: e,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
return cty.UnknownVal(e.Op.Type), diags
|
||||
}
|
||||
|
||||
return result, diags
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) Range() hcl.Range {
|
||||
return e.SrcRange
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) StartRange() hcl.Range {
|
||||
return e.LHS.StartRange()
|
||||
}
|
||||
|
||||
type UnaryOpExpr struct {
|
||||
Op *Operation
|
||||
Val Expression
|
||||
|
||||
SrcRange hcl.Range
|
||||
SymbolRange hcl.Range
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
|
||||
w(e.Val)
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
impl := e.Op.Impl // assumed to be a function taking exactly one argument
|
||||
params := impl.Params()
|
||||
param := params[0]
|
||||
|
||||
givenVal, diags := e.Val.Value(ctx)
|
||||
|
||||
val, err := convert.Convert(givenVal, param.Type)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid operand",
|
||||
Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err),
|
||||
Subject: e.Val.Range().Ptr(),
|
||||
Context: &e.SrcRange,
|
||||
Expression: e.Val,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
}
|
||||
|
||||
if diags.HasErrors() {
|
||||
// Don't actually try the call if we have errors already, since the
|
||||
// this will probably just produce a confusing duplicative diagnostic.
|
||||
return cty.UnknownVal(e.Op.Type), diags
|
||||
}
|
||||
|
||||
args := []cty.Value{val}
|
||||
result, err := impl.Call(args)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
// FIXME: This diagnostic is useless.
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Operation failed",
|
||||
Detail: fmt.Sprintf("Error during operation: %s.", err),
|
||||
Subject: &e.SrcRange,
|
||||
Expression: e,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
return cty.UnknownVal(e.Op.Type), diags
|
||||
}
|
||||
|
||||
return result, diags
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) Range() hcl.Range {
|
||||
return e.SrcRange
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) StartRange() hcl.Range {
|
||||
return e.SymbolRange
|
||||
}
|
||||
220
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go
generated
vendored
Normal file
220
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
type TemplateExpr struct {
|
||||
Parts []Expression
|
||||
|
||||
SrcRange hcl.Range
|
||||
}
|
||||
|
||||
func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
|
||||
for _, part := range e.Parts {
|
||||
w(part)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
buf := &bytes.Buffer{}
|
||||
var diags hcl.Diagnostics
|
||||
isKnown := true
|
||||
|
||||
for _, part := range e.Parts {
|
||||
partVal, partDiags := part.Value(ctx)
|
||||
diags = append(diags, partDiags...)
|
||||
|
||||
if partVal.IsNull() {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template interpolation value",
|
||||
Detail: fmt.Sprintf(
|
||||
"The expression result is null. Cannot include a null value in a string template.",
|
||||
),
|
||||
Subject: part.Range().Ptr(),
|
||||
Context: &e.SrcRange,
|
||||
Expression: part,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if !partVal.IsKnown() {
|
||||
// If any part is unknown then the result as a whole must be
|
||||
// unknown too. We'll keep on processing the rest of the parts
|
||||
// anyway, because we want to still emit any diagnostics resulting
|
||||
// from evaluating those.
|
||||
isKnown = false
|
||||
continue
|
||||
}
|
||||
|
||||
strVal, err := convert.Convert(partVal, cty.String)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template interpolation value",
|
||||
Detail: fmt.Sprintf(
|
||||
"Cannot include the given value in a string template: %s.",
|
||||
err.Error(),
|
||||
),
|
||||
Subject: part.Range().Ptr(),
|
||||
Context: &e.SrcRange,
|
||||
Expression: part,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
buf.WriteString(strVal.AsString())
|
||||
}
|
||||
|
||||
if !isKnown {
|
||||
return cty.UnknownVal(cty.String), diags
|
||||
}
|
||||
|
||||
return cty.StringVal(buf.String()), diags
|
||||
}
|
||||
|
||||
func (e *TemplateExpr) Range() hcl.Range {
|
||||
return e.SrcRange
|
||||
}
|
||||
|
||||
func (e *TemplateExpr) StartRange() hcl.Range {
|
||||
return e.Parts[0].StartRange()
|
||||
}
|
||||
|
||||
// IsStringLiteral returns true if and only if the template consists only of
|
||||
// single string literal, as would be created for a simple quoted string like
|
||||
// "foo".
|
||||
//
|
||||
// If this function returns true, then calling Value on the same expression
|
||||
// with a nil EvalContext will return the literal value.
|
||||
//
|
||||
// Note that "${"foo"}", "${1}", etc aren't considered literal values for the
|
||||
// purposes of this method, because the intent of this method is to identify
|
||||
// situations where the user seems to be explicitly intending literal string
|
||||
// interpretation, not situations that result in literals as a technicality
|
||||
// of the template expression unwrapping behavior.
|
||||
func (e *TemplateExpr) IsStringLiteral() bool {
|
||||
if len(e.Parts) != 1 {
|
||||
return false
|
||||
}
|
||||
_, ok := e.Parts[0].(*LiteralValueExpr)
|
||||
return ok
|
||||
}
|
||||
|
||||
// TemplateJoinExpr is used to convert tuples of strings produced by template
|
||||
// constructs (i.e. for loops) into flat strings, by converting the values
|
||||
// tos strings and joining them. This AST node is not used directly; it's
|
||||
// produced as part of the AST of a "for" loop in a template.
|
||||
type TemplateJoinExpr struct {
|
||||
Tuple Expression
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
|
||||
w(e.Tuple)
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
tuple, diags := e.Tuple.Value(ctx)
|
||||
|
||||
if tuple.IsNull() {
|
||||
// This indicates a bug in the code that constructed the AST.
|
||||
panic("TemplateJoinExpr got null tuple")
|
||||
}
|
||||
if tuple.Type() == cty.DynamicPseudoType {
|
||||
return cty.UnknownVal(cty.String), diags
|
||||
}
|
||||
if !tuple.Type().IsTupleType() {
|
||||
// This indicates a bug in the code that constructed the AST.
|
||||
panic("TemplateJoinExpr got non-tuple tuple")
|
||||
}
|
||||
if !tuple.IsKnown() {
|
||||
return cty.UnknownVal(cty.String), diags
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
it := tuple.ElementIterator()
|
||||
for it.Next() {
|
||||
_, val := it.Element()
|
||||
|
||||
if val.IsNull() {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template interpolation value",
|
||||
Detail: fmt.Sprintf(
|
||||
"An iteration result is null. Cannot include a null value in a string template.",
|
||||
),
|
||||
Subject: e.Range().Ptr(),
|
||||
Expression: e,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
continue
|
||||
}
|
||||
if val.Type() == cty.DynamicPseudoType {
|
||||
return cty.UnknownVal(cty.String), diags
|
||||
}
|
||||
strVal, err := convert.Convert(val, cty.String)
|
||||
if err != nil {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template interpolation value",
|
||||
Detail: fmt.Sprintf(
|
||||
"Cannot include one of the interpolation results into the string template: %s.",
|
||||
err.Error(),
|
||||
),
|
||||
Subject: e.Range().Ptr(),
|
||||
Expression: e,
|
||||
EvalContext: ctx,
|
||||
})
|
||||
continue
|
||||
}
|
||||
if !val.IsKnown() {
|
||||
return cty.UnknownVal(cty.String), diags
|
||||
}
|
||||
|
||||
buf.WriteString(strVal.AsString())
|
||||
}
|
||||
|
||||
return cty.StringVal(buf.String()), diags
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) Range() hcl.Range {
|
||||
return e.Tuple.Range()
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) StartRange() hcl.Range {
|
||||
return e.Tuple.StartRange()
|
||||
}
|
||||
|
||||
// TemplateWrapExpr is used instead of a TemplateExpr when a template
|
||||
// consists _only_ of a single interpolation sequence. In that case, the
|
||||
// template's result is the single interpolation's result, verbatim with
|
||||
// no type conversions.
|
||||
type TemplateWrapExpr struct {
|
||||
Wrapped Expression
|
||||
|
||||
SrcRange hcl.Range
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
|
||||
w(e.Wrapped)
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
|
||||
return e.Wrapped.Value(ctx)
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) Range() hcl.Range {
|
||||
return e.SrcRange
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) StartRange() hcl.Range {
|
||||
return e.SrcRange
|
||||
}
|
||||
76
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go
generated
vendored
Normal file
76
vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package hclsyntax
|
||||
|
||||
// Generated by expression_vars_get.go. DO NOT EDIT.
|
||||
// Run 'go generate' on this package to update the set of functions here.
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
func (e *AnonSymbolExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *BinaryOpExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *ConditionalExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *ForExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *FunctionCallExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *IndexExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *LiteralValueExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *ObjectConsExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *RelativeTraversalExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *ScopeTraversalExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *SplatExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *TemplateExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *TemplateJoinExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *TemplateWrapExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *TupleConsExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
|
||||
func (e *UnaryOpExpr) Variables() []hcl.Traversal {
|
||||
return Variables(e)
|
||||
}
|
||||
20
vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// File is the top-level object resulting from parsing a configuration file.
|
||||
type File struct {
|
||||
Body *Body
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
func (f *File) AsHCLFile() *hcl.File {
|
||||
return &hcl.File{
|
||||
Body: f.Body,
|
||||
Bytes: f.Bytes,
|
||||
|
||||
// TODO: The Nav object, once we have an implementation of it
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go
generated
vendored
Normal file
9
vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package hclsyntax
|
||||
|
||||
//go:generate go run expression_vars_gen.go
|
||||
//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl
|
||||
//go:generate ragel -Z scan_tokens.rl
|
||||
//go:generate gofmt -w scan_tokens.go
|
||||
//go:generate ragel -Z scan_string_lit.rl
|
||||
//go:generate gofmt -w scan_string_lit.go
|
||||
//go:generate stringer -type TokenType -output token_type_string.go
|
||||
21
vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type Keyword []byte
|
||||
|
||||
var forKeyword = Keyword([]byte{'f', 'o', 'r'})
|
||||
var inKeyword = Keyword([]byte{'i', 'n'})
|
||||
var ifKeyword = Keyword([]byte{'i', 'f'})
|
||||
var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'})
|
||||
var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'})
|
||||
var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'})
|
||||
|
||||
func (kw Keyword) TokenMatches(token Token) bool {
|
||||
if token.Type != TokenIdent {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal([]byte(kw), token.Bytes)
|
||||
}
|
||||
59
vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go
generated
vendored
Normal file
59
vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
type navigation struct {
|
||||
root *Body
|
||||
}
|
||||
|
||||
// Implementation of hcled.ContextString
|
||||
func (n navigation) ContextString(offset int) string {
|
||||
// We will walk our top-level blocks until we find one that contains
|
||||
// the given offset, and then construct a representation of the header
|
||||
// of the block.
|
||||
|
||||
var block *Block
|
||||
for _, candidate := range n.root.Blocks {
|
||||
if candidate.Range().ContainsOffset(offset) {
|
||||
block = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(block.Labels) == 0 {
|
||||
// Easy case!
|
||||
return block.Type
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
buf.WriteString(block.Type)
|
||||
for _, label := range block.Labels {
|
||||
fmt.Fprintf(buf, " %q", label)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (n navigation) ContextDefRange(offset int) hcl.Range {
|
||||
var block *Block
|
||||
for _, candidate := range n.root.Blocks {
|
||||
if candidate.Range().ContainsOffset(offset) {
|
||||
block = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
return hcl.Range{}
|
||||
}
|
||||
|
||||
return block.DefRange()
|
||||
}
|
||||
22
vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go
generated
vendored
Normal file
22
vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// Node is the abstract type that every AST node implements.
|
||||
//
|
||||
// This is a closed interface, so it cannot be implemented from outside of
|
||||
// this package.
|
||||
type Node interface {
|
||||
// This is the mechanism by which the public-facing walk functions
|
||||
// are implemented. Implementations should call the given function
|
||||
// for each child node and then replace that node with its return value.
|
||||
// The return value might just be the same node, for non-transforming
|
||||
// walks.
|
||||
walkChildNodes(w internalWalkFunc)
|
||||
|
||||
Range() hcl.Range
|
||||
}
|
||||
|
||||
type internalWalkFunc func(Node)
|
||||
2054
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go
generated
vendored
Normal file
2054
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
799
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go
generated
vendored
Normal file
799
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go
generated
vendored
Normal file
@@ -0,0 +1,799 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
|
||||
return p.parseTemplate(TokenEOF, false)
|
||||
}
|
||||
|
||||
func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) {
|
||||
exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc)
|
||||
|
||||
if passthru {
|
||||
if len(exprs) != 1 {
|
||||
panic("passthru set with len(exprs) != 1")
|
||||
}
|
||||
return &TemplateWrapExpr{
|
||||
Wrapped: exprs[0],
|
||||
SrcRange: rng,
|
||||
}, diags
|
||||
}
|
||||
|
||||
return &TemplateExpr{
|
||||
Parts: exprs,
|
||||
SrcRange: rng,
|
||||
}, diags
|
||||
}
|
||||
|
||||
func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
|
||||
parts, diags := p.parseTemplateParts(end)
|
||||
if flushHeredoc {
|
||||
flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec
|
||||
}
|
||||
tp := templateParser{
|
||||
Tokens: parts.Tokens,
|
||||
SrcRange: parts.SrcRange,
|
||||
}
|
||||
exprs, exprsDiags := tp.parseRoot()
|
||||
diags = append(diags, exprsDiags...)
|
||||
|
||||
passthru := false
|
||||
if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token
|
||||
if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp {
|
||||
passthru = true
|
||||
}
|
||||
}
|
||||
|
||||
return exprs, passthru, parts.SrcRange, diags
|
||||
}
|
||||
|
||||
type templateParser struct {
|
||||
Tokens []templateToken
|
||||
SrcRange hcl.Range
|
||||
|
||||
pos int
|
||||
}
|
||||
|
||||
func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) {
|
||||
var exprs []Expression
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
for {
|
||||
next := p.Peek()
|
||||
if _, isEnd := next.(*templateEndToken); isEnd {
|
||||
break
|
||||
}
|
||||
|
||||
expr, exprDiags := p.parseExpr()
|
||||
diags = append(diags, exprDiags...)
|
||||
exprs = append(exprs, expr)
|
||||
}
|
||||
|
||||
return exprs, diags
|
||||
}
|
||||
|
||||
func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) {
|
||||
next := p.Peek()
|
||||
switch tok := next.(type) {
|
||||
|
||||
case *templateLiteralToken:
|
||||
p.Read() // eat literal
|
||||
return &LiteralValueExpr{
|
||||
Val: cty.StringVal(tok.Val),
|
||||
SrcRange: tok.SrcRange,
|
||||
}, nil
|
||||
|
||||
case *templateInterpToken:
|
||||
p.Read() // eat interp
|
||||
return tok.Expr, nil
|
||||
|
||||
case *templateIfToken:
|
||||
return p.parseIf()
|
||||
|
||||
case *templateForToken:
|
||||
return p.parseFor()
|
||||
|
||||
case *templateEndToken:
|
||||
p.Read() // eat erroneous token
|
||||
return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
|
||||
{
|
||||
// This is a particularly unhelpful diagnostic, so callers
|
||||
// should attempt to pre-empt it and produce a more helpful
|
||||
// diagnostic that is context-aware.
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unexpected end of template",
|
||||
Detail: "The control directives within this template are unbalanced.",
|
||||
Subject: &tok.SrcRange,
|
||||
},
|
||||
}
|
||||
|
||||
case *templateEndCtrlToken:
|
||||
p.Read() // eat erroneous token
|
||||
return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
|
||||
{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()),
|
||||
Detail: "The control directives within this template are unbalanced.",
|
||||
Subject: &tok.SrcRange,
|
||||
},
|
||||
}
|
||||
|
||||
default:
|
||||
// should never happen, because above should be exhaustive
|
||||
panic(fmt.Sprintf("unhandled template token type %T", next))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) {
|
||||
open := p.Read()
|
||||
openIf, isIf := open.(*templateIfToken)
|
||||
if !isIf {
|
||||
// should never happen if caller is behaving
|
||||
panic("parseIf called with peeker not pointing at if token")
|
||||
}
|
||||
|
||||
var ifExprs, elseExprs []Expression
|
||||
var diags hcl.Diagnostics
|
||||
var endifRange hcl.Range
|
||||
|
||||
currentExprs := &ifExprs
|
||||
Token:
|
||||
for {
|
||||
next := p.Peek()
|
||||
if end, isEnd := next.(*templateEndToken); isEnd {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unexpected end of template",
|
||||
Detail: fmt.Sprintf(
|
||||
"The if directive at %s is missing its corresponding endif directive.",
|
||||
openIf.SrcRange,
|
||||
),
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
return errPlaceholderExpr(end.SrcRange), diags
|
||||
}
|
||||
if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
|
||||
p.Read() // eat end directive
|
||||
|
||||
switch end.Type {
|
||||
|
||||
case templateElse:
|
||||
if currentExprs == &ifExprs {
|
||||
currentExprs = &elseExprs
|
||||
continue Token
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unexpected else directive",
|
||||
Detail: fmt.Sprintf(
|
||||
"Already in the else clause for the if started at %s.",
|
||||
openIf.SrcRange,
|
||||
),
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
|
||||
case templateEndIf:
|
||||
endifRange = end.SrcRange
|
||||
break Token
|
||||
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
|
||||
Detail: fmt.Sprintf(
|
||||
"Expecting an endif directive for the if started at %s.",
|
||||
openIf.SrcRange,
|
||||
),
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
}
|
||||
|
||||
return errPlaceholderExpr(end.SrcRange), diags
|
||||
}
|
||||
|
||||
expr, exprDiags := p.parseExpr()
|
||||
diags = append(diags, exprDiags...)
|
||||
*currentExprs = append(*currentExprs, expr)
|
||||
}
|
||||
|
||||
if len(ifExprs) == 0 {
|
||||
ifExprs = append(ifExprs, &LiteralValueExpr{
|
||||
Val: cty.StringVal(""),
|
||||
SrcRange: hcl.Range{
|
||||
Filename: openIf.SrcRange.Filename,
|
||||
Start: openIf.SrcRange.End,
|
||||
End: openIf.SrcRange.End,
|
||||
},
|
||||
})
|
||||
}
|
||||
if len(elseExprs) == 0 {
|
||||
elseExprs = append(elseExprs, &LiteralValueExpr{
|
||||
Val: cty.StringVal(""),
|
||||
SrcRange: hcl.Range{
|
||||
Filename: endifRange.Filename,
|
||||
Start: endifRange.Start,
|
||||
End: endifRange.Start,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
trueExpr := &TemplateExpr{
|
||||
Parts: ifExprs,
|
||||
SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()),
|
||||
}
|
||||
falseExpr := &TemplateExpr{
|
||||
Parts: elseExprs,
|
||||
SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()),
|
||||
}
|
||||
|
||||
return &ConditionalExpr{
|
||||
Condition: openIf.CondExpr,
|
||||
TrueResult: trueExpr,
|
||||
FalseResult: falseExpr,
|
||||
|
||||
SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange),
|
||||
}, diags
|
||||
}
|
||||
|
||||
func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) {
|
||||
open := p.Read()
|
||||
openFor, isFor := open.(*templateForToken)
|
||||
if !isFor {
|
||||
// should never happen if caller is behaving
|
||||
panic("parseFor called with peeker not pointing at for token")
|
||||
}
|
||||
|
||||
var contentExprs []Expression
|
||||
var diags hcl.Diagnostics
|
||||
var endforRange hcl.Range
|
||||
|
||||
Token:
|
||||
for {
|
||||
next := p.Peek()
|
||||
if end, isEnd := next.(*templateEndToken); isEnd {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unexpected end of template",
|
||||
Detail: fmt.Sprintf(
|
||||
"The for directive at %s is missing its corresponding endfor directive.",
|
||||
openFor.SrcRange,
|
||||
),
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
return errPlaceholderExpr(end.SrcRange), diags
|
||||
}
|
||||
if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
|
||||
p.Read() // eat end directive
|
||||
|
||||
switch end.Type {
|
||||
|
||||
case templateElse:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unexpected else directive",
|
||||
Detail: "An else clause is not expected for a for directive.",
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
|
||||
case templateEndFor:
|
||||
endforRange = end.SrcRange
|
||||
break Token
|
||||
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
|
||||
Detail: fmt.Sprintf(
|
||||
"Expecting an endfor directive corresponding to the for directive at %s.",
|
||||
openFor.SrcRange,
|
||||
),
|
||||
Subject: &end.SrcRange,
|
||||
})
|
||||
}
|
||||
|
||||
return errPlaceholderExpr(end.SrcRange), diags
|
||||
}
|
||||
|
||||
expr, exprDiags := p.parseExpr()
|
||||
diags = append(diags, exprDiags...)
|
||||
contentExprs = append(contentExprs, expr)
|
||||
}
|
||||
|
||||
if len(contentExprs) == 0 {
|
||||
contentExprs = append(contentExprs, &LiteralValueExpr{
|
||||
Val: cty.StringVal(""),
|
||||
SrcRange: hcl.Range{
|
||||
Filename: openFor.SrcRange.Filename,
|
||||
Start: openFor.SrcRange.End,
|
||||
End: openFor.SrcRange.End,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
contentExpr := &TemplateExpr{
|
||||
Parts: contentExprs,
|
||||
SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()),
|
||||
}
|
||||
|
||||
forExpr := &ForExpr{
|
||||
KeyVar: openFor.KeyVar,
|
||||
ValVar: openFor.ValVar,
|
||||
|
||||
CollExpr: openFor.CollExpr,
|
||||
ValExpr: contentExpr,
|
||||
|
||||
SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange),
|
||||
OpenRange: openFor.SrcRange,
|
||||
CloseRange: endforRange,
|
||||
}
|
||||
|
||||
return &TemplateJoinExpr{
|
||||
Tuple: forExpr,
|
||||
}, diags
|
||||
}
|
||||
|
||||
func (p *templateParser) Peek() templateToken {
|
||||
return p.Tokens[p.pos]
|
||||
}
|
||||
|
||||
func (p *templateParser) Read() templateToken {
|
||||
ret := p.Peek()
|
||||
if _, end := ret.(*templateEndToken); !end {
|
||||
p.pos++
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// parseTemplateParts produces a flat sequence of "template tokens", which are
|
||||
// either literal values (with any "trimming" already applied), interpolation
|
||||
// sequences, or control flow markers.
|
||||
//
|
||||
// A further pass is required on the result to turn it into an AST.
|
||||
func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) {
|
||||
var parts []templateToken
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
startRange := p.NextRange()
|
||||
ltrimNext := false
|
||||
nextCanTrimPrev := false
|
||||
var endRange hcl.Range
|
||||
|
||||
Token:
|
||||
for {
|
||||
next := p.Read()
|
||||
if next.Type == end {
|
||||
// all done!
|
||||
endRange = next.Range
|
||||
break
|
||||
}
|
||||
|
||||
ltrim := ltrimNext
|
||||
ltrimNext = false
|
||||
canTrimPrev := nextCanTrimPrev
|
||||
nextCanTrimPrev = false
|
||||
|
||||
switch next.Type {
|
||||
case TokenStringLit, TokenQuotedLit:
|
||||
str, strDiags := ParseStringLiteralToken(next)
|
||||
diags = append(diags, strDiags...)
|
||||
|
||||
if ltrim {
|
||||
str = strings.TrimLeftFunc(str, unicode.IsSpace)
|
||||
}
|
||||
|
||||
parts = append(parts, &templateLiteralToken{
|
||||
Val: str,
|
||||
SrcRange: next.Range,
|
||||
})
|
||||
nextCanTrimPrev = true
|
||||
|
||||
case TokenTemplateInterp:
|
||||
// if the opener is ${~ then we want to eat any trailing whitespace
|
||||
// in the preceding literal token, assuming it is indeed a literal
|
||||
// token.
|
||||
if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
|
||||
prevExpr := parts[len(parts)-1]
|
||||
if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
|
||||
lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
|
||||
p.PushIncludeNewlines(false)
|
||||
expr, exprDiags := p.ParseExpression()
|
||||
diags = append(diags, exprDiags...)
|
||||
close := p.Peek()
|
||||
if close.Type != TokenTemplateSeqEnd {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Extra characters after interpolation expression",
|
||||
Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.",
|
||||
Subject: &close.Range,
|
||||
Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
} else {
|
||||
p.Read() // eat closing brace
|
||||
|
||||
// If the closer is ~} then we want to eat any leading
|
||||
// whitespace on the next token, if it turns out to be a
|
||||
// literal token.
|
||||
if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
|
||||
ltrimNext = true
|
||||
}
|
||||
}
|
||||
p.PopIncludeNewlines()
|
||||
parts = append(parts, &templateInterpToken{
|
||||
Expr: expr,
|
||||
SrcRange: hcl.RangeBetween(next.Range, close.Range),
|
||||
})
|
||||
|
||||
case TokenTemplateControl:
|
||||
// if the opener is %{~ then we want to eat any trailing whitespace
|
||||
// in the preceding literal token, assuming it is indeed a literal
|
||||
// token.
|
||||
if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
|
||||
prevExpr := parts[len(parts)-1]
|
||||
if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
|
||||
lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
p.PushIncludeNewlines(false)
|
||||
|
||||
kw := p.Peek()
|
||||
if kw.Type != TokenIdent {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template directive",
|
||||
Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.",
|
||||
Subject: &kw.Range,
|
||||
Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
p.PopIncludeNewlines()
|
||||
continue Token
|
||||
}
|
||||
p.Read() // eat keyword token
|
||||
|
||||
switch {
|
||||
|
||||
case ifKeyword.TokenMatches(kw):
|
||||
condExpr, exprDiags := p.ParseExpression()
|
||||
diags = append(diags, exprDiags...)
|
||||
parts = append(parts, &templateIfToken{
|
||||
CondExpr: condExpr,
|
||||
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
|
||||
})
|
||||
|
||||
case elseKeyword.TokenMatches(kw):
|
||||
parts = append(parts, &templateEndCtrlToken{
|
||||
Type: templateElse,
|
||||
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
|
||||
})
|
||||
|
||||
case endifKeyword.TokenMatches(kw):
|
||||
parts = append(parts, &templateEndCtrlToken{
|
||||
Type: templateEndIf,
|
||||
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
|
||||
})
|
||||
|
||||
case forKeyword.TokenMatches(kw):
|
||||
var keyName, valName string
|
||||
if p.Peek().Type != TokenIdent {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid 'for' directive",
|
||||
Detail: "For directive requires variable name after 'for'.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
p.PopIncludeNewlines()
|
||||
continue Token
|
||||
}
|
||||
|
||||
valName = string(p.Read().Bytes)
|
||||
|
||||
if p.Peek().Type == TokenComma {
|
||||
// What we just read was actually the key, then.
|
||||
keyName = valName
|
||||
p.Read() // eat comma
|
||||
|
||||
if p.Peek().Type != TokenIdent {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid 'for' directive",
|
||||
Detail: "For directive requires value variable name after comma.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
p.PopIncludeNewlines()
|
||||
continue Token
|
||||
}
|
||||
|
||||
valName = string(p.Read().Bytes)
|
||||
}
|
||||
|
||||
if !inKeyword.TokenMatches(p.Peek()) {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid 'for' directive",
|
||||
Detail: "For directive requires 'in' keyword after names.",
|
||||
Subject: p.Peek().Range.Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
p.PopIncludeNewlines()
|
||||
continue Token
|
||||
}
|
||||
p.Read() // eat 'in' keyword
|
||||
|
||||
collExpr, collDiags := p.ParseExpression()
|
||||
diags = append(diags, collDiags...)
|
||||
parts = append(parts, &templateForToken{
|
||||
KeyVar: keyName,
|
||||
ValVar: valName,
|
||||
CollExpr: collExpr,
|
||||
|
||||
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
|
||||
})
|
||||
|
||||
case endforKeyword.TokenMatches(kw):
|
||||
parts = append(parts, &templateEndCtrlToken{
|
||||
Type: templateEndFor,
|
||||
SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
|
||||
})
|
||||
|
||||
default:
|
||||
if !p.recovery {
|
||||
suggestions := []string{"if", "for", "else", "endif", "endfor"}
|
||||
given := string(kw.Bytes)
|
||||
suggestion := nameSuggestion(given, suggestions)
|
||||
if suggestion != "" {
|
||||
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid template control keyword",
|
||||
Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion),
|
||||
Subject: &kw.Range,
|
||||
Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
p.PopIncludeNewlines()
|
||||
continue Token
|
||||
|
||||
}
|
||||
|
||||
close := p.Peek()
|
||||
if close.Type != TokenTemplateSeqEnd {
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes),
|
||||
Detail: "Expected a closing brace to end the sequence, but found extra characters.",
|
||||
Subject: &close.Range,
|
||||
Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
p.recover(TokenTemplateSeqEnd)
|
||||
} else {
|
||||
p.Read() // eat closing brace
|
||||
|
||||
// If the closer is ~} then we want to eat any leading
|
||||
// whitespace on the next token, if it turns out to be a
|
||||
// literal token.
|
||||
if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
|
||||
ltrimNext = true
|
||||
}
|
||||
}
|
||||
p.PopIncludeNewlines()
|
||||
|
||||
default:
|
||||
if !p.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unterminated template string",
|
||||
Detail: "No closing marker was found for the string.",
|
||||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(startRange, next.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
final := p.recover(end)
|
||||
endRange = final.Range
|
||||
break Token
|
||||
}
|
||||
}
|
||||
|
||||
if len(parts) == 0 {
|
||||
// If a sequence has no content, we'll treat it as if it had an
|
||||
// empty string in it because that's what the user probably means
|
||||
// if they write "" in configuration.
|
||||
parts = append(parts, &templateLiteralToken{
|
||||
Val: "",
|
||||
SrcRange: hcl.Range{
|
||||
// Range is the zero-character span immediately after the
|
||||
// opening quote.
|
||||
Filename: startRange.Filename,
|
||||
Start: startRange.End,
|
||||
End: startRange.End,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Always end with an end token, so the parser can produce diagnostics
|
||||
// about unclosed items with proper position information.
|
||||
parts = append(parts, &templateEndToken{
|
||||
SrcRange: endRange,
|
||||
})
|
||||
|
||||
ret := &templateParts{
|
||||
Tokens: parts,
|
||||
SrcRange: hcl.RangeBetween(startRange, endRange),
|
||||
}
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// flushHeredocTemplateParts modifies in-place the line-leading literal strings
|
||||
// to apply the flush heredoc processing rule: find the line with the smallest
|
||||
// number of whitespace characters as prefix and then trim that number of
|
||||
// characters from all of the lines.
|
||||
//
|
||||
// This rule is applied to static tokens rather than to the rendered result,
|
||||
// so interpolating a string with leading whitespace cannot affect the chosen
|
||||
// prefix length.
|
||||
func flushHeredocTemplateParts(parts *templateParts) {
|
||||
if len(parts.Tokens) == 0 {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const maxInt = int((^uint(0)) >> 1)
|
||||
|
||||
minSpaces := maxInt
|
||||
newline := true
|
||||
var adjust []*templateLiteralToken
|
||||
for _, ttok := range parts.Tokens {
|
||||
if newline {
|
||||
newline = false
|
||||
var spaces int
|
||||
if lit, ok := ttok.(*templateLiteralToken); ok {
|
||||
orig := lit.Val
|
||||
trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace)
|
||||
// If a token is entirely spaces and ends with a newline
|
||||
// then it's a "blank line" and thus not considered for
|
||||
// space-prefix-counting purposes.
|
||||
if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") {
|
||||
spaces = maxInt
|
||||
} else {
|
||||
spaceBytes := len(lit.Val) - len(trimmed)
|
||||
spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters)
|
||||
adjust = append(adjust, lit)
|
||||
}
|
||||
} else if _, ok := ttok.(*templateEndToken); ok {
|
||||
break // don't process the end token since it never has spaces before it
|
||||
}
|
||||
if spaces < minSpaces {
|
||||
minSpaces = spaces
|
||||
}
|
||||
}
|
||||
if lit, ok := ttok.(*templateLiteralToken); ok {
|
||||
if strings.HasSuffix(lit.Val, "\n") {
|
||||
newline = true // The following token, if any, begins a new line
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, lit := range adjust {
|
||||
// Since we want to count space _characters_ rather than space _bytes_,
|
||||
// we can't just do a straightforward slice operation here and instead
|
||||
// need to hunt for the split point with a scanner.
|
||||
valBytes := []byte(lit.Val)
|
||||
spaceByteCount := 0
|
||||
for i := 0; i < minSpaces; i++ {
|
||||
adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true)
|
||||
spaceByteCount += adv
|
||||
valBytes = valBytes[adv:]
|
||||
}
|
||||
lit.Val = lit.Val[spaceByteCount:]
|
||||
lit.SrcRange.Start.Column += minSpaces
|
||||
lit.SrcRange.Start.Byte += spaceByteCount
|
||||
}
|
||||
}
|
||||
|
||||
type templateParts struct {
|
||||
Tokens []templateToken
|
||||
SrcRange hcl.Range
|
||||
}
|
||||
|
||||
// templateToken is a higher-level token that represents a single atom within
|
||||
// the template language. Our template parsing first raises the raw token
|
||||
// stream to a sequence of templateToken, and then transforms the result into
|
||||
// an expression tree.
|
||||
type templateToken interface {
|
||||
templateToken() templateToken
|
||||
}
|
||||
|
||||
type templateLiteralToken struct {
|
||||
Val string
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
type templateInterpToken struct {
|
||||
Expr Expression
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
type templateIfToken struct {
|
||||
CondExpr Expression
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
type templateForToken struct {
|
||||
KeyVar string // empty if ignoring key
|
||||
ValVar string
|
||||
CollExpr Expression
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
type templateEndCtrlType int
|
||||
|
||||
const (
|
||||
templateEndIf templateEndCtrlType = iota
|
||||
templateElse
|
||||
templateEndFor
|
||||
)
|
||||
|
||||
type templateEndCtrlToken struct {
|
||||
Type templateEndCtrlType
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
func (t *templateEndCtrlToken) Name() string {
|
||||
switch t.Type {
|
||||
case templateEndIf:
|
||||
return "endif"
|
||||
case templateElse:
|
||||
return "else"
|
||||
case templateEndFor:
|
||||
return "endfor"
|
||||
default:
|
||||
// should never happen
|
||||
panic("invalid templateEndCtrlType")
|
||||
}
|
||||
}
|
||||
|
||||
type templateEndToken struct {
|
||||
SrcRange hcl.Range
|
||||
isTemplateToken
|
||||
}
|
||||
|
||||
type isTemplateToken [0]int
|
||||
|
||||
func (t isTemplateToken) templateToken() templateToken {
|
||||
return t
|
||||
}
|
||||
159
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go
generated
vendored
Normal file
159
vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// ParseTraversalAbs parses an absolute traversal that is assumed to consume
|
||||
// all of the remaining tokens in the peeker. The usual parser recovery
|
||||
// behavior is not supported here because traversals are not expected to
|
||||
// be parsed as part of a larger program.
|
||||
func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) {
|
||||
var ret hcl.Traversal
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
// Absolute traversal must always begin with a variable name
|
||||
varTok := p.Read()
|
||||
if varTok.Type != TokenIdent {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Variable name required",
|
||||
Detail: "Must begin with a variable name.",
|
||||
Subject: &varTok.Range,
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
varName := string(varTok.Bytes)
|
||||
ret = append(ret, hcl.TraverseRoot{
|
||||
Name: varName,
|
||||
SrcRange: varTok.Range,
|
||||
})
|
||||
|
||||
for {
|
||||
next := p.Peek()
|
||||
|
||||
if next.Type == TokenEOF {
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
switch next.Type {
|
||||
case TokenDot:
|
||||
// Attribute access
|
||||
dot := p.Read() // eat dot
|
||||
nameTok := p.Read()
|
||||
if nameTok.Type != TokenIdent {
|
||||
if nameTok.Type == TokenStar {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Attribute name required",
|
||||
Detail: "Splat expressions (.*) may not be used here.",
|
||||
Subject: &nameTok.Range,
|
||||
Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
|
||||
})
|
||||
} else {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Attribute name required",
|
||||
Detail: "Dot must be followed by attribute name.",
|
||||
Subject: &nameTok.Range,
|
||||
Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
attrName := string(nameTok.Bytes)
|
||||
ret = append(ret, hcl.TraverseAttr{
|
||||
Name: attrName,
|
||||
SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range),
|
||||
})
|
||||
case TokenOBrack:
|
||||
// Index
|
||||
open := p.Read() // eat open bracket
|
||||
next := p.Peek()
|
||||
|
||||
switch next.Type {
|
||||
case TokenNumberLit:
|
||||
tok := p.Read() // eat number
|
||||
numVal, numDiags := p.numberLitValue(tok)
|
||||
diags = append(diags, numDiags...)
|
||||
|
||||
close := p.Read()
|
||||
if close.Type != TokenCBrack {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unclosed index brackets",
|
||||
Detail: "Index key must be followed by a closing bracket.",
|
||||
Subject: &close.Range,
|
||||
Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
ret = append(ret, hcl.TraverseIndex{
|
||||
Key: numVal,
|
||||
SrcRange: hcl.RangeBetween(open.Range, close.Range),
|
||||
})
|
||||
|
||||
if diags.HasErrors() {
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
case TokenOQuote:
|
||||
str, _, strDiags := p.parseQuotedStringLiteral()
|
||||
diags = append(diags, strDiags...)
|
||||
|
||||
close := p.Read()
|
||||
if close.Type != TokenCBrack {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unclosed index brackets",
|
||||
Detail: "Index key must be followed by a closing bracket.",
|
||||
Subject: &close.Range,
|
||||
Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
ret = append(ret, hcl.TraverseIndex{
|
||||
Key: cty.StringVal(str),
|
||||
SrcRange: hcl.RangeBetween(open.Range, close.Range),
|
||||
})
|
||||
|
||||
if diags.HasErrors() {
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
default:
|
||||
if next.Type == TokenStar {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Attribute name required",
|
||||
Detail: "Splat expressions ([*]) may not be used here.",
|
||||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
|
||||
})
|
||||
} else {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Index value required",
|
||||
Detail: "Index brackets must contain either a literal number or a literal string.",
|
||||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
|
||||
})
|
||||
}
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
default:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "Expected an attribute access or an index operator.",
|
||||
Subject: &next.Range,
|
||||
Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
}
|
||||
}
|
||||
212
vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go
generated
vendored
Normal file
212
vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// This is set to true at init() time in tests, to enable more useful output
|
||||
// if a stack discipline error is detected. It should not be enabled in
|
||||
// normal mode since there is a performance penalty from accessing the
|
||||
// runtime stack to produce the traces, but could be temporarily set to
|
||||
// true for debugging if desired.
|
||||
var tracePeekerNewlinesStack = false
|
||||
|
||||
type peeker struct {
|
||||
Tokens Tokens
|
||||
NextIndex int
|
||||
|
||||
IncludeComments bool
|
||||
IncludeNewlinesStack []bool
|
||||
|
||||
// used only when tracePeekerNewlinesStack is set
|
||||
newlineStackChanges []peekerNewlineStackChange
|
||||
}
|
||||
|
||||
// for use in debugging the stack usage only
|
||||
type peekerNewlineStackChange struct {
|
||||
Pushing bool // if false, then popping
|
||||
Frame runtime.Frame
|
||||
Include bool
|
||||
}
|
||||
|
||||
func newPeeker(tokens Tokens, includeComments bool) *peeker {
|
||||
return &peeker{
|
||||
Tokens: tokens,
|
||||
IncludeComments: includeComments,
|
||||
|
||||
IncludeNewlinesStack: []bool{true},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *peeker) Peek() Token {
|
||||
ret, _ := p.nextToken()
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *peeker) Read() Token {
|
||||
ret, nextIdx := p.nextToken()
|
||||
p.NextIndex = nextIdx
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *peeker) NextRange() hcl.Range {
|
||||
return p.Peek().Range
|
||||
}
|
||||
|
||||
func (p *peeker) PrevRange() hcl.Range {
|
||||
if p.NextIndex == 0 {
|
||||
return p.NextRange()
|
||||
}
|
||||
|
||||
return p.Tokens[p.NextIndex-1].Range
|
||||
}
|
||||
|
||||
func (p *peeker) nextToken() (Token, int) {
|
||||
for i := p.NextIndex; i < len(p.Tokens); i++ {
|
||||
tok := p.Tokens[i]
|
||||
switch tok.Type {
|
||||
case TokenComment:
|
||||
if !p.IncludeComments {
|
||||
// Single-line comment tokens, starting with # or //, absorb
|
||||
// the trailing newline that terminates them as part of their
|
||||
// bytes. When we're filtering out comments, we must as a
|
||||
// special case transform these to newline tokens in order
|
||||
// to properly parse newline-terminated block items.
|
||||
|
||||
if p.includingNewlines() {
|
||||
if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
|
||||
fakeNewline := Token{
|
||||
Type: TokenNewline,
|
||||
Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)],
|
||||
|
||||
// We use the whole token range as the newline
|
||||
// range, even though that's a little... weird,
|
||||
// because otherwise we'd need to go count
|
||||
// characters again in order to figure out the
|
||||
// column of the newline, and that complexity
|
||||
// isn't justified when ranges of newlines are
|
||||
// so rarely printed anyway.
|
||||
Range: tok.Range,
|
||||
}
|
||||
return fakeNewline, i + 1
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
case TokenNewline:
|
||||
if !p.includingNewlines() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return tok, i + 1
|
||||
}
|
||||
|
||||
// if we fall out here then we'll return the EOF token, and leave
|
||||
// our index pointed off the end of the array so we'll keep
|
||||
// returning EOF in future too.
|
||||
return p.Tokens[len(p.Tokens)-1], len(p.Tokens)
|
||||
}
|
||||
|
||||
func (p *peeker) includingNewlines() bool {
|
||||
return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1]
|
||||
}
|
||||
|
||||
func (p *peeker) PushIncludeNewlines(include bool) {
|
||||
if tracePeekerNewlinesStack {
|
||||
// Record who called us so that we can more easily track down any
|
||||
// mismanagement of the stack in the parser.
|
||||
callers := []uintptr{0}
|
||||
runtime.Callers(2, callers)
|
||||
frames := runtime.CallersFrames(callers)
|
||||
frame, _ := frames.Next()
|
||||
p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
|
||||
true, frame, include,
|
||||
})
|
||||
}
|
||||
|
||||
p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include)
|
||||
}
|
||||
|
||||
func (p *peeker) PopIncludeNewlines() bool {
|
||||
stack := p.IncludeNewlinesStack
|
||||
remain, ret := stack[:len(stack)-1], stack[len(stack)-1]
|
||||
p.IncludeNewlinesStack = remain
|
||||
|
||||
if tracePeekerNewlinesStack {
|
||||
// Record who called us so that we can more easily track down any
|
||||
// mismanagement of the stack in the parser.
|
||||
callers := []uintptr{0}
|
||||
runtime.Callers(2, callers)
|
||||
frames := runtime.CallersFrames(callers)
|
||||
frame, _ := frames.Next()
|
||||
p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
|
||||
false, frame, ret,
|
||||
})
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing
|
||||
// panicking if it is not. This can be used to catch stack mismanagement that
|
||||
// might otherwise just cause confusing downstream errors.
|
||||
//
|
||||
// This function is a no-op if the stack is empty when called.
|
||||
//
|
||||
// If newlines stack tracing is enabled by setting the global variable
|
||||
// tracePeekerNewlinesStack at init time, a full log of all of the push/pop
|
||||
// calls will be produced to help identify which caller in the parser is
|
||||
// misbehaving.
|
||||
func (p *peeker) AssertEmptyIncludeNewlinesStack() {
|
||||
if len(p.IncludeNewlinesStack) != 1 {
|
||||
// Should never happen; indicates mismanagement of the stack inside
|
||||
// the parser.
|
||||
if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above
|
||||
panic(fmt.Errorf(
|
||||
"non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s",
|
||||
len(p.IncludeNewlinesStack)-1,
|
||||
formatPeekerNewlineStackChanges(p.newlineStackChanges),
|
||||
))
|
||||
} else {
|
||||
panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string {
|
||||
indent := 0
|
||||
var buf bytes.Buffer
|
||||
for _, change := range changes {
|
||||
funcName := change.Frame.Function
|
||||
if idx := strings.LastIndexByte(funcName, '.'); idx != -1 {
|
||||
funcName = funcName[idx+1:]
|
||||
}
|
||||
filename := change.Frame.File
|
||||
if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 {
|
||||
filename = filename[idx+1:]
|
||||
}
|
||||
|
||||
switch change.Pushing {
|
||||
|
||||
case true:
|
||||
buf.WriteString(strings.Repeat(" ", indent))
|
||||
fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
|
||||
indent++
|
||||
|
||||
case false:
|
||||
indent--
|
||||
buf.WriteString(strings.Repeat(" ", indent))
|
||||
fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
|
||||
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
171
vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go
generated
vendored
Normal file
171
vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// ParseConfig parses the given buffer as a whole HCL config file, returning
|
||||
// a *hcl.File representing its contents. If HasErrors called on the returned
|
||||
// diagnostics returns true, the returned body is likely to be incomplete
|
||||
// and should therefore be used with care.
|
||||
//
|
||||
// The body in the returned file has dynamic type *hclsyntax.Body, so callers
|
||||
// may freely type-assert this to get access to the full hclsyntax API in
|
||||
// situations where detailed access is required. However, most common use-cases
|
||||
// should be served using the hcl.Body interface to ensure compatibility with
|
||||
// other configurationg syntaxes, such as JSON.
|
||||
func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) {
|
||||
tokens, diags := LexConfig(src, filename, start)
|
||||
peeker := newPeeker(tokens, false)
|
||||
parser := &parser{peeker: peeker}
|
||||
body, parseDiags := parser.ParseBody(TokenEOF)
|
||||
diags = append(diags, parseDiags...)
|
||||
|
||||
// Panic if the parser uses incorrect stack discipline with the peeker's
|
||||
// newlines stack, since otherwise it will produce confusing downstream
|
||||
// errors.
|
||||
peeker.AssertEmptyIncludeNewlinesStack()
|
||||
|
||||
return &hcl.File{
|
||||
Body: body,
|
||||
Bytes: src,
|
||||
|
||||
Nav: navigation{
|
||||
root: body,
|
||||
},
|
||||
}, diags
|
||||
}
|
||||
|
||||
// ParseExpression parses the given buffer as a standalone HCL expression,
|
||||
// returning it as an instance of Expression.
|
||||
func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
|
||||
tokens, diags := LexExpression(src, filename, start)
|
||||
peeker := newPeeker(tokens, false)
|
||||
parser := &parser{peeker: peeker}
|
||||
|
||||
// Bare expressions are always parsed in "ignore newlines" mode, as if
|
||||
// they were wrapped in parentheses.
|
||||
parser.PushIncludeNewlines(false)
|
||||
|
||||
expr, parseDiags := parser.ParseExpression()
|
||||
diags = append(diags, parseDiags...)
|
||||
|
||||
next := parser.Peek()
|
||||
if next.Type != TokenEOF && !parser.recovery {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Extra characters after expression",
|
||||
Detail: "An expression was successfully parsed, but extra characters were found after it.",
|
||||
Subject: &next.Range,
|
||||
})
|
||||
}
|
||||
|
||||
parser.PopIncludeNewlines()
|
||||
|
||||
// Panic if the parser uses incorrect stack discipline with the peeker's
|
||||
// newlines stack, since otherwise it will produce confusing downstream
|
||||
// errors.
|
||||
peeker.AssertEmptyIncludeNewlinesStack()
|
||||
|
||||
return expr, diags
|
||||
}
|
||||
|
||||
// ParseTemplate parses the given buffer as a standalone HCL template,
|
||||
// returning it as an instance of Expression.
|
||||
func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
|
||||
tokens, diags := LexTemplate(src, filename, start)
|
||||
peeker := newPeeker(tokens, false)
|
||||
parser := &parser{peeker: peeker}
|
||||
expr, parseDiags := parser.ParseTemplate()
|
||||
diags = append(diags, parseDiags...)
|
||||
|
||||
// Panic if the parser uses incorrect stack discipline with the peeker's
|
||||
// newlines stack, since otherwise it will produce confusing downstream
|
||||
// errors.
|
||||
peeker.AssertEmptyIncludeNewlinesStack()
|
||||
|
||||
return expr, diags
|
||||
}
|
||||
|
||||
// ParseTraversalAbs parses the given buffer as a standalone absolute traversal.
|
||||
//
|
||||
// Parsing as a traversal is more limited than parsing as an expession since
|
||||
// it allows only attribute and indexing operations on variables. Traverals
|
||||
// are useful as a syntax for referring to objects without necessarily
|
||||
// evaluating them.
|
||||
func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) {
|
||||
tokens, diags := LexExpression(src, filename, start)
|
||||
peeker := newPeeker(tokens, false)
|
||||
parser := &parser{peeker: peeker}
|
||||
|
||||
// Bare traverals are always parsed in "ignore newlines" mode, as if
|
||||
// they were wrapped in parentheses.
|
||||
parser.PushIncludeNewlines(false)
|
||||
|
||||
expr, parseDiags := parser.ParseTraversalAbs()
|
||||
diags = append(diags, parseDiags...)
|
||||
|
||||
parser.PopIncludeNewlines()
|
||||
|
||||
// Panic if the parser uses incorrect stack discipline with the peeker's
|
||||
// newlines stack, since otherwise it will produce confusing downstream
|
||||
// errors.
|
||||
peeker.AssertEmptyIncludeNewlinesStack()
|
||||
|
||||
return expr, diags
|
||||
}
|
||||
|
||||
// LexConfig performs lexical analysis on the given buffer, treating it as a
|
||||
// whole HCL config file, and returns the resulting tokens.
|
||||
//
|
||||
// Only minimal validation is done during lexical analysis, so the returned
|
||||
// diagnostics may include errors about lexical issues such as bad character
|
||||
// encodings or unrecognized characters, but full parsing is required to
|
||||
// detect _all_ syntax errors.
|
||||
func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
|
||||
tokens := scanTokens(src, filename, start, scanNormal)
|
||||
diags := checkInvalidTokens(tokens)
|
||||
return tokens, diags
|
||||
}
|
||||
|
||||
// LexExpression performs lexical analysis on the given buffer, treating it as
|
||||
// a standalone HCL expression, and returns the resulting tokens.
|
||||
//
|
||||
// Only minimal validation is done during lexical analysis, so the returned
|
||||
// diagnostics may include errors about lexical issues such as bad character
|
||||
// encodings or unrecognized characters, but full parsing is required to
|
||||
// detect _all_ syntax errors.
|
||||
func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
|
||||
// This is actually just the same thing as LexConfig, since configs
|
||||
// and expressions lex in the same way.
|
||||
tokens := scanTokens(src, filename, start, scanNormal)
|
||||
diags := checkInvalidTokens(tokens)
|
||||
return tokens, diags
|
||||
}
|
||||
|
||||
// LexTemplate performs lexical analysis on the given buffer, treating it as a
|
||||
// standalone HCL template, and returns the resulting tokens.
|
||||
//
|
||||
// Only minimal validation is done during lexical analysis, so the returned
|
||||
// diagnostics may include errors about lexical issues such as bad character
|
||||
// encodings or unrecognized characters, but full parsing is required to
|
||||
// detect _all_ syntax errors.
|
||||
func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
|
||||
tokens := scanTokens(src, filename, start, scanTemplate)
|
||||
diags := checkInvalidTokens(tokens)
|
||||
return tokens, diags
|
||||
}
|
||||
|
||||
// ValidIdentifier tests if the given string could be a valid identifier in
|
||||
// a native syntax expression.
|
||||
//
|
||||
// This is useful when accepting names from the user that will be used as
|
||||
// variable or attribute names in the scope, to ensure that any name chosen
|
||||
// will be traversable using the variable or attribute traversal syntax.
|
||||
func ValidIdentifier(s string) bool {
|
||||
// This is a kinda-expensive way to do something pretty simple, but it
|
||||
// is easiest to do with our existing scanner-related infrastructure here
|
||||
// and nobody should be validating identifiers in a tight loop.
|
||||
tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly)
|
||||
return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF
|
||||
}
|
||||
301
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go
generated
vendored
Normal file
301
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go
generated
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
//line scan_string_lit.rl:1
|
||||
|
||||
package hclsyntax
|
||||
|
||||
// This file is generated from scan_string_lit.rl. DO NOT EDIT.
|
||||
|
||||
//line scan_string_lit.go:9
|
||||
var _hclstrtok_actions []byte = []byte{
|
||||
0, 1, 0, 1, 1, 2, 1, 0,
|
||||
}
|
||||
|
||||
var _hclstrtok_key_offsets []byte = []byte{
|
||||
0, 0, 2, 4, 6, 10, 14, 18,
|
||||
22, 27, 31, 36, 41, 46, 51, 57,
|
||||
62, 74, 85, 96, 107, 118, 129, 140,
|
||||
151,
|
||||
}
|
||||
|
||||
var _hclstrtok_trans_keys []byte = []byte{
|
||||
128, 191, 128, 191, 128, 191, 10, 13,
|
||||
36, 37, 10, 13, 36, 37, 10, 13,
|
||||
36, 37, 10, 13, 36, 37, 10, 13,
|
||||
36, 37, 123, 10, 13, 36, 37, 10,
|
||||
13, 36, 37, 92, 10, 13, 36, 37,
|
||||
92, 10, 13, 36, 37, 92, 10, 13,
|
||||
36, 37, 92, 10, 13, 36, 37, 92,
|
||||
123, 10, 13, 36, 37, 92, 85, 117,
|
||||
128, 191, 192, 223, 224, 239, 240, 247,
|
||||
248, 255, 10, 13, 36, 37, 92, 48,
|
||||
57, 65, 70, 97, 102, 10, 13, 36,
|
||||
37, 92, 48, 57, 65, 70, 97, 102,
|
||||
10, 13, 36, 37, 92, 48, 57, 65,
|
||||
70, 97, 102, 10, 13, 36, 37, 92,
|
||||
48, 57, 65, 70, 97, 102, 10, 13,
|
||||
36, 37, 92, 48, 57, 65, 70, 97,
|
||||
102, 10, 13, 36, 37, 92, 48, 57,
|
||||
65, 70, 97, 102, 10, 13, 36, 37,
|
||||
92, 48, 57, 65, 70, 97, 102, 10,
|
||||
13, 36, 37, 92, 48, 57, 65, 70,
|
||||
97, 102,
|
||||
}
|
||||
|
||||
var _hclstrtok_single_lengths []byte = []byte{
|
||||
0, 0, 0, 0, 4, 4, 4, 4,
|
||||
5, 4, 5, 5, 5, 5, 6, 5,
|
||||
2, 5, 5, 5, 5, 5, 5, 5,
|
||||
5,
|
||||
}
|
||||
|
||||
var _hclstrtok_range_lengths []byte = []byte{
|
||||
0, 1, 1, 1, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
5, 3, 3, 3, 3, 3, 3, 3,
|
||||
3,
|
||||
}
|
||||
|
||||
var _hclstrtok_index_offsets []byte = []byte{
|
||||
0, 0, 2, 4, 6, 11, 16, 21,
|
||||
26, 32, 37, 43, 49, 55, 61, 68,
|
||||
74, 82, 91, 100, 109, 118, 127, 136,
|
||||
145,
|
||||
}
|
||||
|
||||
var _hclstrtok_indicies []byte = []byte{
|
||||
0, 1, 2, 1, 3, 1, 5, 6,
|
||||
7, 8, 4, 10, 11, 12, 13, 9,
|
||||
14, 11, 12, 13, 9, 10, 11, 15,
|
||||
13, 9, 10, 11, 12, 13, 14, 9,
|
||||
10, 11, 12, 15, 9, 17, 18, 19,
|
||||
20, 21, 16, 23, 24, 25, 26, 27,
|
||||
22, 0, 24, 25, 26, 27, 22, 23,
|
||||
24, 28, 26, 27, 22, 23, 24, 25,
|
||||
26, 27, 0, 22, 23, 24, 25, 28,
|
||||
27, 22, 29, 30, 22, 2, 3, 31,
|
||||
22, 0, 23, 24, 25, 26, 27, 32,
|
||||
32, 32, 22, 23, 24, 25, 26, 27,
|
||||
33, 33, 33, 22, 23, 24, 25, 26,
|
||||
27, 34, 34, 34, 22, 23, 24, 25,
|
||||
26, 27, 30, 30, 30, 22, 23, 24,
|
||||
25, 26, 27, 35, 35, 35, 22, 23,
|
||||
24, 25, 26, 27, 36, 36, 36, 22,
|
||||
23, 24, 25, 26, 27, 37, 37, 37,
|
||||
22, 23, 24, 25, 26, 27, 0, 0,
|
||||
0, 22,
|
||||
}
|
||||
|
||||
var _hclstrtok_trans_targs []byte = []byte{
|
||||
11, 0, 1, 2, 4, 5, 6, 7,
|
||||
9, 4, 5, 6, 7, 9, 5, 8,
|
||||
10, 11, 12, 13, 15, 16, 10, 11,
|
||||
12, 13, 15, 16, 14, 17, 21, 3,
|
||||
18, 19, 20, 22, 23, 24,
|
||||
}
|
||||
|
||||
var _hclstrtok_trans_actions []byte = []byte{
|
||||
0, 0, 0, 0, 0, 1, 1, 1,
|
||||
1, 3, 5, 5, 5, 5, 0, 0,
|
||||
0, 1, 1, 1, 1, 1, 3, 5,
|
||||
5, 5, 5, 5, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
|
||||
var _hclstrtok_eof_actions []byte = []byte{
|
||||
0, 0, 0, 0, 0, 3, 3, 3,
|
||||
3, 3, 0, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3,
|
||||
}
|
||||
|
||||
const hclstrtok_start int = 4
|
||||
const hclstrtok_first_final int = 4
|
||||
const hclstrtok_error int = 0
|
||||
|
||||
const hclstrtok_en_quoted int = 10
|
||||
const hclstrtok_en_unquoted int = 4
|
||||
|
||||
//line scan_string_lit.rl:10
|
||||
|
||||
func scanStringLit(data []byte, quoted bool) [][]byte {
|
||||
var ret [][]byte
|
||||
|
||||
//line scan_string_lit.rl:61
|
||||
|
||||
// Ragel state
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
eof := pe
|
||||
|
||||
var cs int // current state
|
||||
switch {
|
||||
case quoted:
|
||||
cs = hclstrtok_en_quoted
|
||||
default:
|
||||
cs = hclstrtok_en_unquoted
|
||||
}
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = eof
|
||||
|
||||
/*token := func () {
|
||||
ret = append(ret, data[ts:te])
|
||||
}*/
|
||||
|
||||
//line scan_string_lit.go:154
|
||||
{
|
||||
}
|
||||
|
||||
//line scan_string_lit.go:158
|
||||
{
|
||||
var _klen int
|
||||
var _trans int
|
||||
var _acts int
|
||||
var _nacts uint
|
||||
var _keys int
|
||||
if p == pe {
|
||||
goto _test_eof
|
||||
}
|
||||
if cs == 0 {
|
||||
goto _out
|
||||
}
|
||||
_resume:
|
||||
_keys = int(_hclstrtok_key_offsets[cs])
|
||||
_trans = int(_hclstrtok_index_offsets[cs])
|
||||
|
||||
_klen = int(_hclstrtok_single_lengths[cs])
|
||||
if _klen > 0 {
|
||||
_lower := int(_keys)
|
||||
var _mid int
|
||||
_upper := int(_keys + _klen - 1)
|
||||
for {
|
||||
if _upper < _lower {
|
||||
break
|
||||
}
|
||||
|
||||
_mid = _lower + ((_upper - _lower) >> 1)
|
||||
switch {
|
||||
case data[p] < _hclstrtok_trans_keys[_mid]:
|
||||
_upper = _mid - 1
|
||||
case data[p] > _hclstrtok_trans_keys[_mid]:
|
||||
_lower = _mid + 1
|
||||
default:
|
||||
_trans += int(_mid - int(_keys))
|
||||
goto _match
|
||||
}
|
||||
}
|
||||
_keys += _klen
|
||||
_trans += _klen
|
||||
}
|
||||
|
||||
_klen = int(_hclstrtok_range_lengths[cs])
|
||||
if _klen > 0 {
|
||||
_lower := int(_keys)
|
||||
var _mid int
|
||||
_upper := int(_keys + (_klen << 1) - 2)
|
||||
for {
|
||||
if _upper < _lower {
|
||||
break
|
||||
}
|
||||
|
||||
_mid = _lower + (((_upper - _lower) >> 1) & ^1)
|
||||
switch {
|
||||
case data[p] < _hclstrtok_trans_keys[_mid]:
|
||||
_upper = _mid - 2
|
||||
case data[p] > _hclstrtok_trans_keys[_mid+1]:
|
||||
_lower = _mid + 2
|
||||
default:
|
||||
_trans += int((_mid - int(_keys)) >> 1)
|
||||
goto _match
|
||||
}
|
||||
}
|
||||
_trans += _klen
|
||||
}
|
||||
|
||||
_match:
|
||||
_trans = int(_hclstrtok_indicies[_trans])
|
||||
cs = int(_hclstrtok_trans_targs[_trans])
|
||||
|
||||
if _hclstrtok_trans_actions[_trans] == 0 {
|
||||
goto _again
|
||||
}
|
||||
|
||||
_acts = int(_hclstrtok_trans_actions[_trans])
|
||||
_nacts = uint(_hclstrtok_actions[_acts])
|
||||
_acts++
|
||||
for ; _nacts > 0; _nacts-- {
|
||||
_acts++
|
||||
switch _hclstrtok_actions[_acts-1] {
|
||||
case 0:
|
||||
//line scan_string_lit.rl:40
|
||||
|
||||
// If te is behind p then we've skipped over some literal
|
||||
// characters which we must now return.
|
||||
if te < p {
|
||||
ret = append(ret, data[te:p])
|
||||
}
|
||||
ts = p
|
||||
|
||||
case 1:
|
||||
//line scan_string_lit.rl:48
|
||||
|
||||
te = p
|
||||
ret = append(ret, data[ts:te])
|
||||
|
||||
//line scan_string_lit.go:253
|
||||
}
|
||||
}
|
||||
|
||||
_again:
|
||||
if cs == 0 {
|
||||
goto _out
|
||||
}
|
||||
p++
|
||||
if p != pe {
|
||||
goto _resume
|
||||
}
|
||||
_test_eof:
|
||||
{
|
||||
}
|
||||
if p == eof {
|
||||
__acts := _hclstrtok_eof_actions[cs]
|
||||
__nacts := uint(_hclstrtok_actions[__acts])
|
||||
__acts++
|
||||
for ; __nacts > 0; __nacts-- {
|
||||
__acts++
|
||||
switch _hclstrtok_actions[__acts-1] {
|
||||
case 1:
|
||||
//line scan_string_lit.rl:48
|
||||
|
||||
te = p
|
||||
ret = append(ret, data[ts:te])
|
||||
|
||||
//line scan_string_lit.go:278
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_out:
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
//line scan_string_lit.rl:89
|
||||
|
||||
if te < p {
|
||||
// Collect any leftover literal characters at the end of the input
|
||||
ret = append(ret, data[te:p])
|
||||
}
|
||||
|
||||
// If we fall out here without being in a final state then we've
|
||||
// encountered something that the scanner can't match, which should
|
||||
// be impossible (the scanner matches all bytes _somehow_) but we'll
|
||||
// tolerate it and let the caller deal with it.
|
||||
if cs < hclstrtok_first_final {
|
||||
ret = append(ret, data[p:len(data)])
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
105
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl
generated
vendored
Normal file
105
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
|
||||
package hclsyntax
|
||||
|
||||
// This file is generated from scan_string_lit.rl. DO NOT EDIT.
|
||||
%%{
|
||||
# (except you are actually in scan_string_lit.rl here, so edit away!)
|
||||
|
||||
machine hclstrtok;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
func scanStringLit(data []byte, quoted bool) [][]byte {
|
||||
var ret [][]byte
|
||||
|
||||
%%{
|
||||
include UnicodeDerived "unicode_derived.rl";
|
||||
|
||||
UTF8Cont = 0x80 .. 0xBF;
|
||||
AnyUTF8 = (
|
||||
0x00..0x7F |
|
||||
0xC0..0xDF . UTF8Cont |
|
||||
0xE0..0xEF . UTF8Cont . UTF8Cont |
|
||||
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
|
||||
);
|
||||
BadUTF8 = any - AnyUTF8;
|
||||
|
||||
Hex = ('0'..'9' | 'a'..'f' | 'A'..'F');
|
||||
|
||||
# Our goal with this patterns is to capture user intent as best as
|
||||
# possible, even if the input is invalid. The caller will then verify
|
||||
# whether each token is valid and generate suitable error messages
|
||||
# if not.
|
||||
UnicodeEscapeShort = "\\u" . Hex{0,4};
|
||||
UnicodeEscapeLong = "\\U" . Hex{0,8};
|
||||
UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong);
|
||||
SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?;
|
||||
TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?);
|
||||
Newline = ("\r\n" | "\r" | "\n");
|
||||
|
||||
action Begin {
|
||||
// If te is behind p then we've skipped over some literal
|
||||
// characters which we must now return.
|
||||
if te < p {
|
||||
ret = append(ret, data[te:p])
|
||||
}
|
||||
ts = p;
|
||||
}
|
||||
action End {
|
||||
te = p;
|
||||
ret = append(ret, data[ts:te]);
|
||||
}
|
||||
|
||||
QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End;
|
||||
UnquotedToken = (TemplateEscape | Newline) >Begin %End;
|
||||
QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n"));
|
||||
UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n"));
|
||||
|
||||
quoted := (QuotedToken | QuotedLiteral)**;
|
||||
unquoted := (UnquotedToken | UnquotedLiteral)**;
|
||||
|
||||
}%%
|
||||
|
||||
// Ragel state
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
eof := pe
|
||||
|
||||
var cs int // current state
|
||||
switch {
|
||||
case quoted:
|
||||
cs = hclstrtok_en_quoted
|
||||
default:
|
||||
cs = hclstrtok_en_unquoted
|
||||
}
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = eof
|
||||
|
||||
/*token := func () {
|
||||
ret = append(ret, data[ts:te])
|
||||
}*/
|
||||
|
||||
%%{
|
||||
write init nocs;
|
||||
write exec;
|
||||
}%%
|
||||
|
||||
if te < p {
|
||||
// Collect any leftover literal characters at the end of the input
|
||||
ret = append(ret, data[te:p])
|
||||
}
|
||||
|
||||
// If we fall out here without being in a final state then we've
|
||||
// encountered something that the scanner can't match, which should
|
||||
// be impossible (the scanner matches all bytes _somehow_) but we'll
|
||||
// tolerate it and let the caller deal with it.
|
||||
if cs < hclstrtok_first_final {
|
||||
ret = append(ret, data[p:len(data)])
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
5265
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go
generated
vendored
Normal file
5265
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
395
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl
generated
vendored
Normal file
395
vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl
generated
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// This file is generated from scan_tokens.rl. DO NOT EDIT.
|
||||
%%{
|
||||
# (except when you are actually in scan_tokens.rl here, so edit away!)
|
||||
|
||||
machine hcltok;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
|
||||
stripData := stripUTF8BOM(data)
|
||||
start.Byte += len(data) - len(stripData)
|
||||
data = stripData
|
||||
|
||||
f := &tokenAccum{
|
||||
Filename: filename,
|
||||
Bytes: data,
|
||||
Pos: start,
|
||||
StartByte: start.Byte,
|
||||
}
|
||||
|
||||
%%{
|
||||
include UnicodeDerived "unicode_derived.rl";
|
||||
|
||||
UTF8Cont = 0x80 .. 0xBF;
|
||||
AnyUTF8 = (
|
||||
0x00..0x7F |
|
||||
0xC0..0xDF . UTF8Cont |
|
||||
0xE0..0xEF . UTF8Cont . UTF8Cont |
|
||||
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
|
||||
);
|
||||
BrokenUTF8 = any - AnyUTF8;
|
||||
|
||||
NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit);
|
||||
NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.')));
|
||||
Ident = (ID_Start | '_') (ID_Continue | '-')*;
|
||||
|
||||
# Symbols that just represent themselves are handled as a single rule.
|
||||
SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'";
|
||||
|
||||
EqualOp = "==";
|
||||
NotEqual = "!=";
|
||||
GreaterThanEqual = ">=";
|
||||
LessThanEqual = "<=";
|
||||
LogicalAnd = "&&";
|
||||
LogicalOr = "||";
|
||||
|
||||
Ellipsis = "...";
|
||||
FatArrow = "=>";
|
||||
|
||||
Newline = '\r' ? '\n';
|
||||
EndOfLine = Newline;
|
||||
|
||||
BeginStringTmpl = '"';
|
||||
BeginHeredocTmpl = '<<' ('-')? Ident Newline;
|
||||
|
||||
Comment = (
|
||||
# The :>> operator in these is a "finish-guarded concatenation",
|
||||
# which terminates the sequence on its left when it completes
|
||||
# the sequence on its right.
|
||||
# In the single-line comment cases this is allowing us to make
|
||||
# the trailing EndOfLine optional while still having the overall
|
||||
# pattern terminate. In the multi-line case it ensures that
|
||||
# the first comment in the file ends at the first */, rather than
|
||||
# gobbling up all of the "any*" until the _final_ */ in the file.
|
||||
("#" (any - EndOfLine)* :>> EndOfLine?) |
|
||||
("//" (any - EndOfLine)* :>> EndOfLine?) |
|
||||
("/*" any* :>> "*/")
|
||||
);
|
||||
|
||||
# Note: hclwrite assumes that only ASCII spaces appear between tokens,
|
||||
# and uses this assumption to recreate the spaces between tokens by
|
||||
# looking at byte offset differences. This means it will produce
|
||||
# incorrect results in the presence of tabs, but that's acceptable
|
||||
# because the canonical style (which hclwrite itself can impose
|
||||
# automatically is to never use tabs).
|
||||
Spaces = (' ' | 0x09)+;
|
||||
|
||||
action beginStringTemplate {
|
||||
token(TokenOQuote);
|
||||
fcall stringTemplate;
|
||||
}
|
||||
|
||||
action endStringTemplate {
|
||||
token(TokenCQuote);
|
||||
fret;
|
||||
}
|
||||
|
||||
action beginHeredocTemplate {
|
||||
token(TokenOHeredoc);
|
||||
// the token is currently the whole heredoc introducer, like
|
||||
// <<EOT or <<-EOT, followed by a newline. We want to extract
|
||||
// just the "EOT" portion that we'll use as the closing marker.
|
||||
|
||||
marker := data[ts+2:te-1]
|
||||
if marker[0] == '-' {
|
||||
marker = marker[1:]
|
||||
}
|
||||
if marker[len(marker)-1] == '\r' {
|
||||
marker = marker[:len(marker)-1]
|
||||
}
|
||||
|
||||
heredocs = append(heredocs, heredocInProgress{
|
||||
Marker: marker,
|
||||
StartOfLine: true,
|
||||
})
|
||||
|
||||
fcall heredocTemplate;
|
||||
}
|
||||
|
||||
action heredocLiteralEOL {
|
||||
// This action is called specificially when a heredoc literal
|
||||
// ends with a newline character.
|
||||
|
||||
// This might actually be our end marker.
|
||||
topdoc := &heredocs[len(heredocs)-1]
|
||||
if topdoc.StartOfLine {
|
||||
maybeMarker := bytes.TrimSpace(data[ts:te])
|
||||
if bytes.Equal(maybeMarker, topdoc.Marker) {
|
||||
// We actually emit two tokens here: the end-of-heredoc
|
||||
// marker first, and then separately the newline that
|
||||
// follows it. This then avoids issues with the closing
|
||||
// marker consuming a newline that would normally be used
|
||||
// to mark the end of an attribute definition.
|
||||
// We might have either a \n sequence or an \r\n sequence
|
||||
// here, so we must handle both.
|
||||
nls := te-1
|
||||
nle := te
|
||||
te--
|
||||
if data[te-1] == '\r' {
|
||||
// back up one more byte
|
||||
nls--
|
||||
te--
|
||||
}
|
||||
token(TokenCHeredoc);
|
||||
ts = nls
|
||||
te = nle
|
||||
token(TokenNewline);
|
||||
heredocs = heredocs[:len(heredocs)-1]
|
||||
fret;
|
||||
}
|
||||
}
|
||||
|
||||
topdoc.StartOfLine = true;
|
||||
token(TokenStringLit);
|
||||
}
|
||||
|
||||
action heredocLiteralMidline {
|
||||
// This action is called when a heredoc literal _doesn't_ end
|
||||
// with a newline character, e.g. because we're about to enter
|
||||
// an interpolation sequence.
|
||||
heredocs[len(heredocs)-1].StartOfLine = false;
|
||||
token(TokenStringLit);
|
||||
}
|
||||
|
||||
action bareTemplateLiteral {
|
||||
token(TokenStringLit);
|
||||
}
|
||||
|
||||
action beginTemplateInterp {
|
||||
token(TokenTemplateInterp);
|
||||
braces++;
|
||||
retBraces = append(retBraces, braces);
|
||||
if len(heredocs) > 0 {
|
||||
heredocs[len(heredocs)-1].StartOfLine = false;
|
||||
}
|
||||
fcall main;
|
||||
}
|
||||
|
||||
action beginTemplateControl {
|
||||
token(TokenTemplateControl);
|
||||
braces++;
|
||||
retBraces = append(retBraces, braces);
|
||||
if len(heredocs) > 0 {
|
||||
heredocs[len(heredocs)-1].StartOfLine = false;
|
||||
}
|
||||
fcall main;
|
||||
}
|
||||
|
||||
action openBrace {
|
||||
token(TokenOBrace);
|
||||
braces++;
|
||||
}
|
||||
|
||||
action closeBrace {
|
||||
if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
|
||||
token(TokenTemplateSeqEnd);
|
||||
braces--;
|
||||
retBraces = retBraces[0:len(retBraces)-1]
|
||||
fret;
|
||||
} else {
|
||||
token(TokenCBrace);
|
||||
braces--;
|
||||
}
|
||||
}
|
||||
|
||||
action closeTemplateSeqEatWhitespace {
|
||||
// Only consume from the retBraces stack and return if we are at
|
||||
// a suitable brace nesting level, otherwise things will get
|
||||
// confused. (Not entering this branch indicates a syntax error,
|
||||
// which we will catch in the parser.)
|
||||
if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
|
||||
token(TokenTemplateSeqEnd);
|
||||
braces--;
|
||||
retBraces = retBraces[0:len(retBraces)-1]
|
||||
fret;
|
||||
} else {
|
||||
// We intentionally generate a TokenTemplateSeqEnd here,
|
||||
// even though the user apparently wanted a brace, because
|
||||
// we want to allow the parser to catch the incorrect use
|
||||
// of a ~} to balance a generic opening brace, rather than
|
||||
// a template sequence.
|
||||
token(TokenTemplateSeqEnd);
|
||||
braces--;
|
||||
}
|
||||
}
|
||||
|
||||
TemplateInterp = "${" ("~")?;
|
||||
TemplateControl = "%{" ("~")?;
|
||||
EndStringTmpl = '"';
|
||||
NewlineChars = ("\r"|"\n");
|
||||
NewlineCharsSeq = NewlineChars+;
|
||||
StringLiteralChars = (AnyUTF8 - NewlineChars);
|
||||
TemplateIgnoredNonBrace = (^'{' %{ fhold; });
|
||||
TemplateNotInterp = '$' (TemplateIgnoredNonBrace | TemplateInterp);
|
||||
TemplateNotControl = '%' (TemplateIgnoredNonBrace | TemplateControl);
|
||||
QuotedStringLiteralWithEsc = ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"' | "\\"));
|
||||
TemplateStringLiteral = (
|
||||
(TemplateNotInterp) |
|
||||
(TemplateNotControl) |
|
||||
(QuotedStringLiteralWithEsc)+
|
||||
);
|
||||
HeredocStringLiteral = (
|
||||
(TemplateNotInterp) |
|
||||
(TemplateNotControl) |
|
||||
(StringLiteralChars - ("$" | '%'))*
|
||||
);
|
||||
BareStringLiteral = (
|
||||
(TemplateNotInterp) |
|
||||
(TemplateNotControl) |
|
||||
(StringLiteralChars - ("$" | '%'))*
|
||||
) Newline?;
|
||||
|
||||
stringTemplate := |*
|
||||
TemplateInterp => beginTemplateInterp;
|
||||
TemplateControl => beginTemplateControl;
|
||||
EndStringTmpl => endStringTemplate;
|
||||
TemplateStringLiteral => { token(TokenQuotedLit); };
|
||||
NewlineCharsSeq => { token(TokenQuotedNewline); };
|
||||
AnyUTF8 => { token(TokenInvalid); };
|
||||
BrokenUTF8 => { token(TokenBadUTF8); };
|
||||
*|;
|
||||
|
||||
heredocTemplate := |*
|
||||
TemplateInterp => beginTemplateInterp;
|
||||
TemplateControl => beginTemplateControl;
|
||||
HeredocStringLiteral EndOfLine => heredocLiteralEOL;
|
||||
HeredocStringLiteral => heredocLiteralMidline;
|
||||
BrokenUTF8 => { token(TokenBadUTF8); };
|
||||
*|;
|
||||
|
||||
bareTemplate := |*
|
||||
TemplateInterp => beginTemplateInterp;
|
||||
TemplateControl => beginTemplateControl;
|
||||
BareStringLiteral => bareTemplateLiteral;
|
||||
BrokenUTF8 => { token(TokenBadUTF8); };
|
||||
*|;
|
||||
|
||||
identOnly := |*
|
||||
Ident => { token(TokenIdent) };
|
||||
BrokenUTF8 => { token(TokenBadUTF8) };
|
||||
AnyUTF8 => { token(TokenInvalid) };
|
||||
*|;
|
||||
|
||||
main := |*
|
||||
Spaces => {};
|
||||
NumberLit => { token(TokenNumberLit) };
|
||||
Ident => { token(TokenIdent) };
|
||||
|
||||
Comment => { token(TokenComment) };
|
||||
Newline => { token(TokenNewline) };
|
||||
|
||||
EqualOp => { token(TokenEqualOp); };
|
||||
NotEqual => { token(TokenNotEqual); };
|
||||
GreaterThanEqual => { token(TokenGreaterThanEq); };
|
||||
LessThanEqual => { token(TokenLessThanEq); };
|
||||
LogicalAnd => { token(TokenAnd); };
|
||||
LogicalOr => { token(TokenOr); };
|
||||
Ellipsis => { token(TokenEllipsis); };
|
||||
FatArrow => { token(TokenFatArrow); };
|
||||
SelfToken => { selfToken() };
|
||||
|
||||
"{" => openBrace;
|
||||
"}" => closeBrace;
|
||||
|
||||
"~}" => closeTemplateSeqEatWhitespace;
|
||||
|
||||
BeginStringTmpl => beginStringTemplate;
|
||||
BeginHeredocTmpl => beginHeredocTemplate;
|
||||
|
||||
BrokenUTF8 => { token(TokenBadUTF8) };
|
||||
AnyUTF8 => { token(TokenInvalid) };
|
||||
*|;
|
||||
|
||||
}%%
|
||||
|
||||
// Ragel state
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
act := 0
|
||||
eof := pe
|
||||
var stack []int
|
||||
var top int
|
||||
|
||||
var cs int // current state
|
||||
switch mode {
|
||||
case scanNormal:
|
||||
cs = hcltok_en_main
|
||||
case scanTemplate:
|
||||
cs = hcltok_en_bareTemplate
|
||||
case scanIdentOnly:
|
||||
cs = hcltok_en_identOnly
|
||||
default:
|
||||
panic("invalid scanMode")
|
||||
}
|
||||
|
||||
braces := 0
|
||||
var retBraces []int // stack of brace levels that cause us to use fret
|
||||
var heredocs []heredocInProgress // stack of heredocs we're currently processing
|
||||
|
||||
%%{
|
||||
prepush {
|
||||
stack = append(stack, 0);
|
||||
}
|
||||
postpop {
|
||||
stack = stack[:len(stack)-1];
|
||||
}
|
||||
}%%
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = te
|
||||
_ = act
|
||||
_ = eof
|
||||
|
||||
token := func (ty TokenType) {
|
||||
f.emitToken(ty, ts, te)
|
||||
}
|
||||
selfToken := func () {
|
||||
b := data[ts:te]
|
||||
if len(b) != 1 {
|
||||
// should never happen
|
||||
panic("selfToken only works for single-character tokens")
|
||||
}
|
||||
f.emitToken(TokenType(b[0]), ts, te)
|
||||
}
|
||||
|
||||
%%{
|
||||
write init nocs;
|
||||
write exec;
|
||||
}%%
|
||||
|
||||
// If we fall out here without being in a final state then we've
|
||||
// encountered something that the scanner can't match, which we'll
|
||||
// deal with as an invalid.
|
||||
if cs < hcltok_first_final {
|
||||
if mode == scanTemplate && len(stack) == 0 {
|
||||
// If we're scanning a bare template then any straggling
|
||||
// top-level stuff is actually literal string, rather than
|
||||
// invalid. This handles the case where the template ends
|
||||
// with a single "$" or "%", which trips us up because we
|
||||
// want to see another character to decide if it's a sequence
|
||||
// or an escape.
|
||||
f.emitToken(TokenStringLit, ts, len(data))
|
||||
} else {
|
||||
f.emitToken(TokenInvalid, ts, len(data))
|
||||
}
|
||||
}
|
||||
|
||||
// We always emit a synthetic EOF token at the end, since it gives the
|
||||
// parser position information for an "unexpected EOF" diagnostic.
|
||||
f.emitToken(TokenEOF, len(data), len(data))
|
||||
|
||||
return f.Tokens
|
||||
}
|
||||
941
vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md
generated
vendored
Normal file
941
vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md
generated
vendored
Normal file
@@ -0,0 +1,941 @@
|
||||
# HCL Native Syntax Specification
|
||||
|
||||
This is the specification of the syntax and semantics of the native syntax
|
||||
for HCL. HCL is a system for defining configuration languages for applications.
|
||||
The HCL information model is designed to support multiple concrete syntaxes
|
||||
for configuration, but this native syntax is considered the primary format
|
||||
and is optimized for human authoring and maintenance, as opposed to machine
|
||||
generation of configuration.
|
||||
|
||||
The language consists of three integrated sub-languages:
|
||||
|
||||
- The _structural_ language defines the overall hierarchical configuration
|
||||
structure, and is a serialization of HCL bodies, blocks and attributes.
|
||||
|
||||
- The _expression_ language is used to express attribute values, either as
|
||||
literals or as derivations of other values.
|
||||
|
||||
- The _template_ language is used to compose values together into strings,
|
||||
as one of several types of expression in the expression language.
|
||||
|
||||
In normal use these three sub-languages are used together within configuration
|
||||
files to describe an overall configuration, with the structural language
|
||||
being used at the top level. The expression and template languages can also
|
||||
be used in isolation, to implement features such as REPLs, debuggers, and
|
||||
integration into more limited HCL syntaxes such as the JSON profile.
|
||||
|
||||
## Syntax Notation
|
||||
|
||||
Within this specification a semi-formal notation is used to illustrate the
|
||||
details of syntax. This notation is intended for human consumption rather
|
||||
than machine consumption, with the following conventions:
|
||||
|
||||
- A naked name starting with an uppercase letter is a global production,
|
||||
common to all of the syntax specifications in this document.
|
||||
- A naked name starting with a lowercase letter is a local production,
|
||||
meaningful only within the specification where it is defined.
|
||||
- Double and single quotes (`"` and `'`) are used to mark literal character
|
||||
sequences, which may be either punctuation markers or keywords.
|
||||
- The default operator for combining items, which has no punctuation,
|
||||
is concatenation.
|
||||
- The symbol `|` indicates that any one of its left and right operands may
|
||||
be present.
|
||||
- The `*` symbol indicates zero or more repetitions of the item to its left.
|
||||
- The `?` symbol indicates zero or one of the item to its left.
|
||||
- Parentheses (`(` and `)`) are used to group items together to apply
|
||||
the `|`, `*` and `?` operators to them collectively.
|
||||
|
||||
The grammar notation does not fully describe the language. The prose may
|
||||
augment or conflict with the illustrated grammar. In case of conflict, prose
|
||||
has priority.
|
||||
|
||||
## Source Code Representation
|
||||
|
||||
Source code is unicode text expressed in the UTF-8 encoding. The language
|
||||
itself does not perform unicode normalization, so syntax features such as
|
||||
identifiers are sequences of unicode code points and so e.g. a precombined
|
||||
accented character is distinct from a letter associated with a combining
|
||||
accent. (String literals have some special handling with regard to Unicode
|
||||
normalization which will be covered later in the relevant section.)
|
||||
|
||||
UTF-8 encoded Unicode byte order marks are not permitted. Invalid or
|
||||
non-normalized UTF-8 encoding is always a parse error.
|
||||
|
||||
## Lexical Elements
|
||||
|
||||
### Comments and Whitespace
|
||||
|
||||
Comments and Whitespace are recognized as lexical elements but are ignored
|
||||
except as described below.
|
||||
|
||||
Whitespace is defined as a sequence of zero or more space characters
|
||||
(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A)
|
||||
are _not_ considered whitespace but are ignored as such in certain contexts.
|
||||
Horizontal tab characters (U+0009) are also treated as whitespace, but are
|
||||
counted only as one "column" for the purpose of reporting source positions.
|
||||
|
||||
Comments serve as program documentation and come in two forms:
|
||||
|
||||
- _Line comments_ start with either the `//` or `#` sequences and end with
|
||||
the next newline sequence. A line comment is considered equivalent to a
|
||||
newline sequence.
|
||||
|
||||
- _Inline comments_ start with the `/*` sequence and end with the `*/`
|
||||
sequence, and may have any characters within except the ending sequence.
|
||||
An inline comments is considered equivalent to a whitespace sequence.
|
||||
|
||||
Comments and whitespace cannot begin within within other comments, or within
|
||||
template literals except inside an interpolation sequence or template directive.
|
||||
|
||||
### Identifiers
|
||||
|
||||
Identifiers name entities such as blocks, attributes and expression variables.
|
||||
Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically,
|
||||
their syntax is defined in terms of the `ID_Start` and `ID_Continue`
|
||||
character properties as follows:
|
||||
|
||||
```ebnf
|
||||
Identifier = ID_Start (ID_Continue | '-')*;
|
||||
```
|
||||
|
||||
The Unicode specification provides the normative requirements for identifier
|
||||
parsing. Non-normatively, the spirit of this specification is that `ID_Start`
|
||||
consists of Unicode letter and certain unambiguous punctuation tokens, while
|
||||
`ID_Continue` augments that set with Unicode digits, combining marks, etc.
|
||||
|
||||
The dash character `-` is additionally allowed in identifiers, even though
|
||||
that is not part of the unicode `ID_Continue` definition. This is to allow
|
||||
attribute names and block type names to contain dashes, although underscores
|
||||
as word separators are considered the idiomatic usage.
|
||||
|
||||
[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
|
||||
|
||||
### Keywords
|
||||
|
||||
There are no globally-reserved words, but in some contexts certain identifiers
|
||||
are reserved to function as keywords. These are discussed further in the
|
||||
relevant documentation sections that follow. In such situations, the
|
||||
identifier's role as a keyword supersedes any other valid interpretation that
|
||||
may be possible. Outside of these specific situations, the keywords have no
|
||||
special meaning and are interpreted as regular identifiers.
|
||||
|
||||
### Operators and Delimiters
|
||||
|
||||
The following character sequences represent operators, delimiters, and other
|
||||
special tokens:
|
||||
|
||||
```
|
||||
+ && == < : { [ ( ${
|
||||
- || != > ? } ] ) %{
|
||||
* ! <= = .
|
||||
/ >= => ,
|
||||
% ...
|
||||
```
|
||||
|
||||
### Numeric Literals
|
||||
|
||||
A numeric literal is a decimal representation of a
|
||||
real number. It has an integer part, a fractional part,
|
||||
and an exponent part.
|
||||
|
||||
```ebnf
|
||||
NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?;
|
||||
decimal = '0' .. '9';
|
||||
expmark = ('e' | 'E') ("+" | "-")?;
|
||||
```
|
||||
|
||||
## Structural Elements
|
||||
|
||||
The structural language consists of syntax representing the following
|
||||
constructs:
|
||||
|
||||
- _Attributes_, which assign a value to a specified name.
|
||||
- _Blocks_, which create a child body annotated by a type and optional labels.
|
||||
- _Body Content_, which consists of a collection of attributes and blocks.
|
||||
|
||||
These constructs correspond to the similarly-named concepts in the
|
||||
language-agnostic HCL information model.
|
||||
|
||||
```ebnf
|
||||
ConfigFile = Body;
|
||||
Body = (Attribute | Block | OneLineBlock)*;
|
||||
Attribute = Identifier "=" Expression Newline;
|
||||
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
|
||||
OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline;
|
||||
```
|
||||
|
||||
### Configuration Files
|
||||
|
||||
A _configuration file_ is a sequence of characters whose top-level is
|
||||
interpreted as a Body.
|
||||
|
||||
### Bodies
|
||||
|
||||
A _body_ is a collection of associated attributes and blocks. The meaning of
|
||||
this association is defined by the calling application.
|
||||
|
||||
### Attribute Definitions
|
||||
|
||||
An _attribute definition_ assigns a value to a particular attribute name within
|
||||
a body. Each distinct attribute name may be defined no more than once within a
|
||||
single body.
|
||||
|
||||
The attribute value is given as an expression, which is retained literally
|
||||
for later evaluation by the calling application.
|
||||
|
||||
### Blocks
|
||||
|
||||
A _block_ creates a child body that is annotated with a block _type_ and
|
||||
zero or more block _labels_. Blocks create a structural hierarchy which can be
|
||||
interpreted by the calling application.
|
||||
|
||||
Block labels can either be quoted literal strings or naked identifiers.
|
||||
|
||||
## Expressions
|
||||
|
||||
The expression sub-language is used within attribute definitions to specify
|
||||
values.
|
||||
|
||||
```ebnf
|
||||
Expression = (
|
||||
ExprTerm |
|
||||
Operation |
|
||||
Conditional
|
||||
);
|
||||
```
|
||||
|
||||
### Types
|
||||
|
||||
The value types used within the expression language are those defined by the
|
||||
syntax-agnostic HCL information model. An expression may return any valid
|
||||
type, but only a subset of the available types have first-class syntax.
|
||||
A calling application may make other types available via _variables_ and
|
||||
_functions_.
|
||||
|
||||
### Expression Terms
|
||||
|
||||
Expression _terms_ are the operands for unary and binary expressions, as well
|
||||
as acting as expressions in their own right.
|
||||
|
||||
```ebnf
|
||||
ExprTerm = (
|
||||
LiteralValue |
|
||||
CollectionValue |
|
||||
TemplateExpr |
|
||||
VariableExpr |
|
||||
FunctionCall |
|
||||
ForExpr |
|
||||
ExprTerm Index |
|
||||
ExprTerm GetAttr |
|
||||
ExprTerm Splat |
|
||||
"(" Expression ")"
|
||||
);
|
||||
```
|
||||
|
||||
The productions for these different term types are given in their corresponding
|
||||
sections.
|
||||
|
||||
Between the `(` and `)` characters denoting a sub-expression, newline
|
||||
characters are ignored as whitespace.
|
||||
|
||||
### Literal Values
|
||||
|
||||
A _literal value_ immediately represents a particular value of a primitive
|
||||
type.
|
||||
|
||||
```ebnf
|
||||
LiteralValue = (
|
||||
NumericLit |
|
||||
"true" |
|
||||
"false" |
|
||||
"null"
|
||||
);
|
||||
```
|
||||
|
||||
- Numeric literals represent values of type _number_.
|
||||
- The `true` and `false` keywords represent values of type _bool_.
|
||||
- The `null` keyword represents a null value of the dynamic pseudo-type.
|
||||
|
||||
String literals are not directly available in the expression sub-language, but
|
||||
are available via the template sub-language, which can in turn be incorporated
|
||||
via _template expressions_.
|
||||
|
||||
### Collection Values
|
||||
|
||||
A _collection value_ combines zero or more other expressions to produce a
|
||||
collection value.
|
||||
|
||||
```ebnf
|
||||
CollectionValue = tuple | object;
|
||||
tuple = "[" (
|
||||
(Expression ("," Expression)* ","?)?
|
||||
) "]";
|
||||
object = "{" (
|
||||
(objectelem ("," objectelem)* ","?)?
|
||||
) "}";
|
||||
objectelem = (Identifier | Expression) "=" Expression;
|
||||
```
|
||||
|
||||
Only tuple and object values can be directly constructed via native syntax.
|
||||
Tuple and object values can in turn be converted to list, set and map values
|
||||
with other operations, which behaves as defined by the syntax-agnostic HCL
|
||||
information model.
|
||||
|
||||
When specifying an object element, an identifier is interpreted as a literal
|
||||
attribute name as opposed to a variable reference. To populate an item key
|
||||
from a variable, use parentheses to disambiguate:
|
||||
|
||||
- `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
|
||||
- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
|
||||
from the variable named `foo`.
|
||||
|
||||
Between the open and closing delimiters of these sequences, newline sequences
|
||||
are ignored as whitespace.
|
||||
|
||||
There is a syntax ambiguity between _for expressions_ and collection values
|
||||
whose first element is a reference to a variable named `for`. The
|
||||
_for expression_ interpretation has priority, so to produce a tuple whose
|
||||
first element is the value of a variable named `for`, or an object with a
|
||||
key named `for`, use parentheses to disambiguate:
|
||||
|
||||
- `[for, foo, baz]` is a syntax error.
|
||||
- `[(for), foo, baz]` is a tuple whose first element is the value of variable
|
||||
`for`.
|
||||
- `{for: 1, baz: 2}` is a syntax error.
|
||||
- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
|
||||
- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
|
||||
ambiguity by reordering.
|
||||
|
||||
### Template Expressions
|
||||
|
||||
A _template expression_ embeds a program written in the template sub-language
|
||||
as an expression. Template expressions come in two forms:
|
||||
|
||||
- A _quoted_ template expression is delimited by quote characters (`"`) and
|
||||
defines a template as a single-line expression with escape characters.
|
||||
- A _heredoc_ template expression is introduced by a `<<` sequence and
|
||||
defines a template via a multi-line sequence terminated by a user-chosen
|
||||
delimiter.
|
||||
|
||||
In both cases the template interpolation and directive syntax is available for
|
||||
use within the delimiters, and any text outside of these special sequences is
|
||||
interpreted as a literal string.
|
||||
|
||||
In _quoted_ template expressions any literal string sequences within the
|
||||
template behave in a special way: literal newline sequences are not permitted
|
||||
and instead _escape sequences_ can be included, starting with the
|
||||
backslash `\`:
|
||||
|
||||
```
|
||||
\n Unicode newline control character
|
||||
\r Unicode carriage return control character
|
||||
\t Unicode tab control character
|
||||
\" Literal quote mark, used to prevent interpretation as end of string
|
||||
\\ Literal backslash, used to prevent interpretation as escape sequence
|
||||
\uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits)
|
||||
\UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits)
|
||||
```
|
||||
|
||||
The _heredoc_ template expression type is introduced by either `<<` or `<<-`,
|
||||
followed by an identifier. The template expression ends when the given
|
||||
identifier subsequently appears again on a line of its own.
|
||||
|
||||
If a heredoc template is introduced with the `<<-` symbol, any literal string
|
||||
at the start of each line is analyzed to find the minimum number of leading
|
||||
spaces, and then that number of prefix spaces is removed from all line-leading
|
||||
literal strings. The final closing marker may also have an arbitrary number
|
||||
of spaces preceding it on its line.
|
||||
|
||||
```ebnf
|
||||
TemplateExpr = quotedTemplate | heredocTemplate;
|
||||
quotedTemplate = (as defined in prose above);
|
||||
heredocTemplate = (
|
||||
("<<" | "<<-") Identifier Newline
|
||||
(content as defined in prose above)
|
||||
Identifier Newline
|
||||
);
|
||||
```
|
||||
|
||||
A quoted template expression containing only a single literal string serves
|
||||
as a syntax for defining literal string _expressions_. In certain contexts
|
||||
the template syntax is restricted in this manner:
|
||||
|
||||
```ebnf
|
||||
StringLit = '"' (quoted literals as defined in prose above) '"';
|
||||
```
|
||||
|
||||
The `StringLit` production permits the escape sequences discussed for quoted
|
||||
template expressions as above, but does _not_ permit template interpolation
|
||||
or directive sequences.
|
||||
|
||||
### Variables and Variable Expressions
|
||||
|
||||
A _variable_ is a value that has been assigned a symbolic name. Variables are
|
||||
made available for use in expressions by the calling application, by populating
|
||||
the _global scope_ used for expression evaluation.
|
||||
|
||||
Variables can also be created by expressions themselves, which always creates
|
||||
a _child scope_ that incorporates the variables from its parent scope but
|
||||
(re-)defines zero or more names with new values.
|
||||
|
||||
The value of a variable is accessed using a _variable expression_, which is
|
||||
a standalone `Identifier` whose name corresponds to a defined variable:
|
||||
|
||||
```ebnf
|
||||
VariableExpr = Identifier;
|
||||
```
|
||||
|
||||
Variables in a particular scope are immutable, but child scopes may _hide_
|
||||
a variable from an ancestor scope by defining a new variable of the same name.
|
||||
When looking up variables, the most locally-defined variable of the given name
|
||||
is used, and ancestor-scoped variables of the same name cannot be accessed.
|
||||
|
||||
No direct syntax is provided for declaring or assigning variables, but other
|
||||
expression constructs implicitly create child scopes and define variables as
|
||||
part of their evaluation.
|
||||
|
||||
### Functions and Function Calls
|
||||
|
||||
A _function_ is an operation that has been assigned a symbolic name. Functions
|
||||
are made available for use in expressions by the calling application, by
|
||||
populating the _function table_ used for expression evaluation.
|
||||
|
||||
The namespace of functions is distinct from the namespace of variables. A
|
||||
function and a variable may share the same name with no implication that they
|
||||
are in any way related.
|
||||
|
||||
A function can be executed via a _function call_ expression:
|
||||
|
||||
```ebnf
|
||||
FunctionCall = Identifier "(" arguments ")";
|
||||
Arguments = (
|
||||
() ||
|
||||
(Expression ("," Expression)* ("," | "...")?)
|
||||
);
|
||||
```
|
||||
|
||||
The definition of functions and the semantics of calling them are defined by
|
||||
the language-agnostic HCL information model. The given arguments are mapped
|
||||
onto the function's _parameters_ and the result of a function call expression
|
||||
is the return value of the named function when given those arguments.
|
||||
|
||||
If the final argument expression is followed by the ellipsis symbol (`...`),
|
||||
the final argument expression must evaluate to either a list or tuple value.
|
||||
The elements of the value are each mapped to a single parameter of the
|
||||
named function, beginning at the first parameter remaining after all other
|
||||
argument expressions have been mapped.
|
||||
|
||||
Within the parentheses that delimit the function arguments, newline sequences
|
||||
are ignored as whitespace.
|
||||
|
||||
### For Expressions
|
||||
|
||||
A _for expression_ is a construct for constructing a collection by projecting
|
||||
the items from another collection.
|
||||
|
||||
```ebnf
|
||||
ForExpr = forTupleExpr | forObjectExpr;
|
||||
forTupleExpr = "[" forIntro Expression forCond? "]";
|
||||
forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}";
|
||||
forIntro = "for" Identifier ("," Identifier)? "in" Expression ":";
|
||||
forCond = "if" Expression;
|
||||
```
|
||||
|
||||
The punctuation used to delimit a for expression decide whether it will produce
|
||||
a tuple value (`[` and `]`) or an object value (`{` and `}`).
|
||||
|
||||
The "introduction" is equivalent in both cases: the keyword `for` followed by
|
||||
either one or two identifiers separated by a comma which define the temporary
|
||||
variable names used for iteration, followed by the keyword `in` and then
|
||||
an expression that must evaluate to a value that can be iterated. The
|
||||
introduction is then terminated by the colon (`:`) symbol.
|
||||
|
||||
If only one identifier is provided, it is the name of a variable that will
|
||||
be temporarily assigned the value of each element during iteration. If both
|
||||
are provided, the first is the key and the second is the value.
|
||||
|
||||
Tuple, object, list, map, and set types are iterable. The type of collection
|
||||
used defines how the key and value variables are populated:
|
||||
|
||||
- For tuple and list types, the _key_ is the zero-based index into the
|
||||
sequence for each element, and the _value_ is the element value. The
|
||||
elements are visited in index order.
|
||||
- For object and map types, the _key_ is the string attribute name or element
|
||||
key, and the _value_ is the attribute or element value. The elements are
|
||||
visited in the order defined by a lexicographic sort of the attribute names
|
||||
or keys.
|
||||
- For set types, the _key_ and _value_ are both the element value. The elements
|
||||
are visited in an undefined but consistent order.
|
||||
|
||||
The expression after the colon and (in the case of object `for`) the expression
|
||||
after the `=>` are both evaluated once for each element of the source
|
||||
collection, in a local scope that defines the key and value variable names
|
||||
specified.
|
||||
|
||||
The results of evaluating these expressions for each input element are used
|
||||
to populate an element in the new collection. In the case of tuple `for`, the
|
||||
single expression becomes an element, appending values to the tuple in visit
|
||||
order. In the case of object `for`, the pair of expressions is used as an
|
||||
attribute name and value respectively, creating an element in the resulting
|
||||
object.
|
||||
|
||||
In the case of object `for`, it is an error if two input elements produce
|
||||
the same result from the attribute name expression, since duplicate
|
||||
attributes are not possible. If the ellipsis symbol (`...`) appears
|
||||
immediately after the value expression, this activates the grouping mode in
|
||||
which each value in the resulting object is a _tuple_ of all of the values
|
||||
that were produced against each distinct key.
|
||||
|
||||
- `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
|
||||
- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
|
||||
- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
|
||||
- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
|
||||
`a` is defined twice.
|
||||
- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
|
||||
|
||||
If the `if` keyword is used after the element expression(s), it applies an
|
||||
additional predicate that can be used to conditionally filter elements from
|
||||
the source collection from consideration. The expression following `if` is
|
||||
evaluated once for each source element, in the same scope used for the
|
||||
element expression(s). It must evaluate to a boolean value; if `true`, the
|
||||
element will be evaluated as normal, while if `false` the element will be
|
||||
skipped.
|
||||
|
||||
- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
|
||||
|
||||
If the collection value, element expression(s) or condition expression return
|
||||
unknown values that are otherwise type-valid, the result is a value of the
|
||||
dynamic pseudo-type.
|
||||
|
||||
### Index Operator
|
||||
|
||||
The _index_ operator returns the value of a single element of a collection
|
||||
value. It is a postfix operator and can be applied to any value that has
|
||||
a tuple, object, map, or list type.
|
||||
|
||||
```ebnf
|
||||
Index = "[" Expression "]";
|
||||
```
|
||||
|
||||
The expression delimited by the brackets is the _key_ by which an element
|
||||
will be looked up.
|
||||
|
||||
If the index operator is applied to a value of tuple or list type, the
|
||||
key expression must be an non-negative integer number representing the
|
||||
zero-based element index to access. If applied to a value of object or map
|
||||
type, the key expression must be a string representing the attribute name
|
||||
or element key. If the given key value is not of the appropriate type, a
|
||||
conversion is attempted using the conversion rules from the HCL
|
||||
syntax-agnostic information model.
|
||||
|
||||
An error is produced if the given key expression does not correspond to
|
||||
an element in the collection, either because it is of an unconvertable type,
|
||||
because it is outside the range of elements for a tuple or list, or because
|
||||
the given attribute or key does not exist.
|
||||
|
||||
If either the collection or the key are an unknown value of an
|
||||
otherwise-suitable type, the return value is an unknown value whose type
|
||||
matches what type would be returned given known values, or a value of the
|
||||
dynamic pseudo-type if type information alone cannot determine a suitable
|
||||
return type.
|
||||
|
||||
Within the brackets that delimit the index key, newline sequences are ignored
|
||||
as whitespace.
|
||||
|
||||
The HCL native syntax also includes a _legacy_ index operator that exists
|
||||
only for compatibility with the precursor language HIL:
|
||||
|
||||
```ebnf
|
||||
LegacyIndex = '.' digit+
|
||||
```
|
||||
|
||||
This legacy index operator must be supported by parser for compatibility but
|
||||
should not be used in new configurations. This allows an attribute-access-like
|
||||
syntax for indexing, must still be interpreted as an index operation rather
|
||||
than attribute access.
|
||||
|
||||
The legacy syntax does not support chaining of index operations, like
|
||||
`foo.0.0.bar`, because the interpretation of `0.0` as a number literal token
|
||||
takes priority and thus renders the resulting sequence invalid.
|
||||
|
||||
### Attribute Access Operator
|
||||
|
||||
The _attribute access_ operator returns the value of a single attribute in
|
||||
an object value. It is a postfix operator and can be applied to any value
|
||||
that has an object type.
|
||||
|
||||
```ebnf
|
||||
GetAttr = "." Identifier;
|
||||
```
|
||||
|
||||
The given identifier is interpreted as the name of the attribute to access.
|
||||
An error is produced if the object to which the operator is applied does not
|
||||
have an attribute with the given name.
|
||||
|
||||
If the object is an unknown value of a type that has the attribute named, the
|
||||
result is an unknown value of the attribute's type.
|
||||
|
||||
### Splat Operators
|
||||
|
||||
The _splat operators_ allow convenient access to attributes or elements of
|
||||
elements in a tuple, list, or set value.
|
||||
|
||||
There are two kinds of "splat" operator:
|
||||
|
||||
- The _attribute-only_ splat operator supports only attribute lookups into
|
||||
the elements from a list, but supports an arbitrary number of them.
|
||||
|
||||
- The _full_ splat operator additionally supports indexing into the elements
|
||||
from a list, and allows any combination of attribute access and index
|
||||
operations.
|
||||
|
||||
```ebnf
|
||||
Splat = attrSplat | fullSplat;
|
||||
attrSplat = "." "*" GetAttr*;
|
||||
fullSplat = "[" "*" "]" (GetAttr | Index)*;
|
||||
```
|
||||
|
||||
The splat operators can be thought of as shorthands for common operations that
|
||||
could otherwise be performed using _for expressions_:
|
||||
|
||||
- `tuple.*.foo.bar[0]` is approximately equivalent to
|
||||
`[for v in tuple: v.foo.bar][0]`.
|
||||
- `tuple[*].foo.bar[0]` is approximately equivalent to
|
||||
`[for v in tuple: v.foo.bar[0]]`
|
||||
|
||||
Note the difference in how the trailing index operator is interpreted in
|
||||
each case. This different interpretation is the key difference between the
|
||||
_attribute-only_ and _full_ splat operators.
|
||||
|
||||
Splat operators have one additional behavior compared to the equivalent
|
||||
_for expressions_ shown above: if a splat operator is applied to a value that
|
||||
is _not_ of tuple, list, or set type, the value is coerced automatically into
|
||||
a single-value list of the value type:
|
||||
|
||||
- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
|
||||
is a single object.
|
||||
- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
|
||||
is a single number.
|
||||
|
||||
If applied to a null value that is not tuple, list, or set, the result is always
|
||||
an empty tuple, which allows conveniently converting a possibly-null scalar
|
||||
value into a tuple of zero or one elements. It is illegal to apply a splat
|
||||
operator to a null value of tuple, list, or set type.
|
||||
|
||||
### Operations
|
||||
|
||||
Operations apply a particular operator to either one or two expression terms.
|
||||
|
||||
```ebnf
|
||||
Operation = unaryOp | binaryOp;
|
||||
unaryOp = ("-" | "!") ExprTerm;
|
||||
binaryOp = ExprTerm binaryOperator ExprTerm;
|
||||
binaryOperator = compareOperator | arithmeticOperator | logicOperator;
|
||||
compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">=";
|
||||
arithmeticOperator = "+" | "-" | "*" | "/" | "%";
|
||||
logicOperator = "&&" | "||" | "!";
|
||||
```
|
||||
|
||||
The unary operators have the highest precedence.
|
||||
|
||||
The binary operators are grouped into the following precedence levels:
|
||||
|
||||
```
|
||||
Level Operators
|
||||
6 * / %
|
||||
5 + -
|
||||
4 > >= < <=
|
||||
3 == !=
|
||||
2 &&
|
||||
1 ||
|
||||
```
|
||||
|
||||
Higher values of "level" bind tighter. Operators within the same precedence
|
||||
level have left-to-right associativity. For example, `x / y * z` is equivalent
|
||||
to `(x / y) * z`.
|
||||
|
||||
### Comparison Operators
|
||||
|
||||
Comparison operators always produce boolean values, as a result of testing
|
||||
the relationship between two values.
|
||||
|
||||
The two equality operators apply to values of any type:
|
||||
|
||||
```
|
||||
a == b equal
|
||||
a != b not equal
|
||||
```
|
||||
|
||||
Two values are equal if the are of identical types and their values are
|
||||
equal as defined in the HCL syntax-agnostic information model. The equality
|
||||
operators are commutative and opposite, such that `(a == b) == !(a != b)`
|
||||
and `(a == b) == (b == a)` for all values `a` and `b`.
|
||||
|
||||
The four numeric comparison operators apply only to numbers:
|
||||
|
||||
```
|
||||
a < b less than
|
||||
a <= b less than or equal to
|
||||
a > b greater than
|
||||
a >= b greater than or equal to
|
||||
```
|
||||
|
||||
If either operand of a comparison operator is a correctly-typed unknown value
|
||||
or a value of the dynamic pseudo-type, the result is an unknown boolean.
|
||||
|
||||
### Arithmetic Operators
|
||||
|
||||
Arithmetic operators apply only to number values and always produce number
|
||||
values as results.
|
||||
|
||||
```
|
||||
a + b sum (addition)
|
||||
a - b difference (subtraction)
|
||||
a * b product (multiplication)
|
||||
a / b quotient (division)
|
||||
a % b remainder (modulo)
|
||||
-a negation
|
||||
```
|
||||
|
||||
Arithmetic operations are considered to be performed in an arbitrary-precision
|
||||
number space.
|
||||
|
||||
If either operand of an arithmetic operator is an unknown number or a value
|
||||
of the dynamic pseudo-type, the result is an unknown number.
|
||||
|
||||
### Logic Operators
|
||||
|
||||
Logic operators apply only to boolean values and always produce boolean values
|
||||
as results.
|
||||
|
||||
```
|
||||
a && b logical AND
|
||||
a || b logical OR
|
||||
!a logical NOT
|
||||
```
|
||||
|
||||
If either operand of a logic operator is an unknown bool value or a value
|
||||
of the dynamic pseudo-type, the result is an unknown bool value.
|
||||
|
||||
### Conditional Operator
|
||||
|
||||
The conditional operator allows selecting from one of two expressions based on
|
||||
the outcome of a boolean expression.
|
||||
|
||||
```ebnf
|
||||
Conditional = Expression "?" Expression ":" Expression;
|
||||
```
|
||||
|
||||
The first expression is the _predicate_, which is evaluated and must produce
|
||||
a boolean result. If the predicate value is `true`, the result of the second
|
||||
expression is the result of the conditional. If the predicate value is
|
||||
`false`, the result of the third expression is the result of the conditional.
|
||||
|
||||
The second and third expressions must be of the same type or must be able to
|
||||
unify into a common type using the type unification rules defined in the
|
||||
HCL syntax-agnostic information model. This unified type is the result type
|
||||
of the conditional, with both expressions converted as necessary to the
|
||||
unified type.
|
||||
|
||||
If the predicate is an unknown boolean value or a value of the dynamic
|
||||
pseudo-type then the result is an unknown value of the unified type of the
|
||||
other two expressions.
|
||||
|
||||
If either the second or third expressions produce errors when evaluated,
|
||||
these errors are passed through only if the erroneous expression is selected.
|
||||
This allows for expressions such as
|
||||
`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length`
|
||||
function) without producing an error when the predicate is `false`.
|
||||
|
||||
## Templates
|
||||
|
||||
The template sub-language is used within template expressions to concisely
|
||||
combine strings and other values to produce other strings. It can also be
|
||||
used in isolation as a standalone template language.
|
||||
|
||||
```ebnf
|
||||
Template = (
|
||||
TemplateLiteral |
|
||||
TemplateInterpolation |
|
||||
TemplateDirective
|
||||
)*
|
||||
TemplateDirective = TemplateIf | TemplateFor;
|
||||
```
|
||||
|
||||
A template behaves like an expression that always returns a string value.
|
||||
The different elements of the template are evaluated and combined into a
|
||||
single string to return. If any of the elements produce an unknown string
|
||||
or a value of the dynamic pseudo-type, the result is an unknown string.
|
||||
|
||||
An important use-case for standalone templates is to enable the use of
|
||||
expressions in alternative HCL syntaxes where a native expression grammar is
|
||||
not available. For example, the HCL JSON profile treats the values of JSON
|
||||
strings as standalone templates when attributes are evaluated in expression
|
||||
mode.
|
||||
|
||||
### Template Literals
|
||||
|
||||
A template literal is a literal sequence of characters to include in the
|
||||
resulting string. When the template sub-language is used standalone, a
|
||||
template literal can contain any unicode character, with the exception
|
||||
of the sequences that introduce interpolations and directives, and for the
|
||||
sequences that escape those introductions.
|
||||
|
||||
The interpolation and directive introductions are escaped by doubling their
|
||||
leading characters. The `${` sequence is escaped as `$${` and the `%{`
|
||||
sequence is escaped as `%%{`.
|
||||
|
||||
When the template sub-language is embedded in the expression language via
|
||||
_template expressions_, additional constraints and transforms are applied to
|
||||
template literals as described in the definition of template expressions.
|
||||
|
||||
The value of a template literal can be modified by _strip markers_ in any
|
||||
interpolations or directives that are adjacent to it. A strip marker is
|
||||
a tilde (`~`) placed immediately after the opening `{` or before the closing
|
||||
`}` of a template sequence:
|
||||
|
||||
- `hello ${~ "world" }` produces `"helloworld"`.
|
||||
- `%{ if true ~} hello %{~ endif }` produces `"hello"`.
|
||||
|
||||
When a strip marker is present, any spaces adjacent to it in the corresponding
|
||||
string literal (if any) are removed before producing the final value. Space
|
||||
characters are interpreted as per Unicode's definition.
|
||||
|
||||
Stripping is done at syntax level rather than value level. Values returned
|
||||
by interpolations or directives are not subject to stripping:
|
||||
|
||||
- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
|
||||
because the space is not in a template literal directly adjacent to the
|
||||
strip marker.
|
||||
|
||||
### Template Interpolations
|
||||
|
||||
An _interpolation sequence_ evaluates an expression (written in the
|
||||
expression sub-language), converts the result to a string value, and
|
||||
replaces itself with the resulting string.
|
||||
|
||||
```ebnf
|
||||
TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}";
|
||||
```
|
||||
|
||||
If the expression result cannot be converted to a string, an error is
|
||||
produced.
|
||||
|
||||
### Template If Directive
|
||||
|
||||
The template `if` directive is the template equivalent of the
|
||||
_conditional expression_, allowing selection of one of two sub-templates based
|
||||
on the value of a predicate expression.
|
||||
|
||||
```ebnf
|
||||
TemplateIf = (
|
||||
("%{" | "%{~") "if" Expression ("}" | "~}")
|
||||
Template
|
||||
(
|
||||
("%{" | "%{~") "else" ("}" | "~}")
|
||||
Template
|
||||
)?
|
||||
("%{" | "%{~") "endif" ("}" | "~}")
|
||||
);
|
||||
```
|
||||
|
||||
The evaluation of the `if` directive is equivalent to the conditional
|
||||
expression, with the following exceptions:
|
||||
|
||||
- The two sub-templates always produce strings, and thus the result value is
|
||||
also always a string.
|
||||
- The `else` clause may be omitted, in which case the conditional's third
|
||||
expression result is implied to be the empty string.
|
||||
|
||||
### Template For Directive
|
||||
|
||||
The template `for` directive is the template equivalent of the _for expression_,
|
||||
producing zero or more copies of its sub-template based on the elements of
|
||||
a collection.
|
||||
|
||||
```ebnf
|
||||
TemplateFor = (
|
||||
("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}")
|
||||
Template
|
||||
("%{" | "%{~") "endfor" ("}" | "~}")
|
||||
);
|
||||
```
|
||||
|
||||
The evaluation of the `for` directive is equivalent to the _for expression_
|
||||
when producing a tuple, with the following exceptions:
|
||||
|
||||
- The sub-template always produces a string.
|
||||
- There is no equivalent of the "if" clause on the for expression.
|
||||
- The elements of the resulting tuple are all converted to strings and
|
||||
concatenated to produce a flat string result.
|
||||
|
||||
### Template Interpolation Unwrapping
|
||||
|
||||
As a special case, a template that consists only of a single interpolation,
|
||||
with no surrounding literals, directives or other interpolations, is
|
||||
"unwrapped". In this case, the result of the interpolation expression is
|
||||
returned verbatim, without conversion to string.
|
||||
|
||||
This special case exists primarily to enable the native template language
|
||||
to be used inside strings in alternative HCL syntaxes that lack a first-class
|
||||
template or expression syntax. Unwrapping allows arbitrary expressions to be
|
||||
used to populate attributes when strings in such languages are interpreted
|
||||
as templates.
|
||||
|
||||
- `${true}` produces the boolean value `true`
|
||||
- `${"${true}"}` produces the boolean value `true`, because both the inner
|
||||
and outer interpolations are subject to unwrapping.
|
||||
- `hello ${true}` produces the string `"hello true"`
|
||||
- `${""}${true}` produces the string `"true"` because there are two
|
||||
interpolation sequences, even though one produces an empty result.
|
||||
- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
|
||||
the presence of the `for` directive circumvents the unwrapping even though
|
||||
the final result is a single value.
|
||||
|
||||
In some contexts this unwrapping behavior may be circumvented by the calling
|
||||
application, by converting the final template result to string. This is
|
||||
necessary, for example, if a standalone template is being used to produce
|
||||
the direct contents of a file, since the result in that case must always be a
|
||||
string.
|
||||
|
||||
## Static Analysis
|
||||
|
||||
The HCL static analysis operations are implemented for some expression types
|
||||
in the native syntax, as described in the following sections.
|
||||
|
||||
A goal for static analysis of the native syntax is for the interpretation to
|
||||
be as consistent as possible with the dynamic evaluation interpretation of
|
||||
the given expression, though some deviations are intentionally made in order
|
||||
to maximize the potential for analysis.
|
||||
|
||||
### Static List
|
||||
|
||||
The tuple construction syntax can be interpreted as a static list. All of
|
||||
the expression elements given are returned as the static list elements,
|
||||
with no further interpretation.
|
||||
|
||||
### Static Map
|
||||
|
||||
The object construction syntax can be interpreted as a static map. All of the
|
||||
key/value pairs given are returned as the static pairs, with no further
|
||||
interpretation.
|
||||
|
||||
The usual requirement that an attribute name be interpretable as a string
|
||||
does not apply to this static analysis, allowing callers to provide map-like
|
||||
constructs with different key types by building on the map syntax.
|
||||
|
||||
### Static Call
|
||||
|
||||
The function call syntax can be interpreted as a static call. The called
|
||||
function name is returned verbatim and the given argument expressions are
|
||||
returned as the static arguments, with no further interpretation.
|
||||
|
||||
### Static Traversal
|
||||
|
||||
A variable expression and any attached attribute access operations and
|
||||
constant index operations can be interpreted as a static traversal.
|
||||
|
||||
The keywords `true`, `false` and `null` can also be interpreted as
|
||||
static traversals, behaving as if they were references to variables of those
|
||||
names, to allow callers to redefine the meaning of those keywords in certain
|
||||
contexts.
|
||||
394
vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go
generated
vendored
Normal file
394
vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go
generated
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// AsHCLBlock returns the block data expressed as a *hcl.Block.
|
||||
func (b *Block) AsHCLBlock() *hcl.Block {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
lastHeaderRange := b.TypeRange
|
||||
if len(b.LabelRanges) > 0 {
|
||||
lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1]
|
||||
}
|
||||
|
||||
return &hcl.Block{
|
||||
Type: b.Type,
|
||||
Labels: b.Labels,
|
||||
Body: b.Body,
|
||||
|
||||
DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange),
|
||||
TypeRange: b.TypeRange,
|
||||
LabelRanges: b.LabelRanges,
|
||||
}
|
||||
}
|
||||
|
||||
// Body is the implementation of hcl.Body for the HCL native syntax.
|
||||
type Body struct {
|
||||
Attributes Attributes
|
||||
Blocks Blocks
|
||||
|
||||
// These are used with PartialContent to produce a "remaining items"
|
||||
// body to return. They are nil on all bodies fresh out of the parser.
|
||||
hiddenAttrs map[string]struct{}
|
||||
hiddenBlocks map[string]struct{}
|
||||
|
||||
SrcRange hcl.Range
|
||||
EndRange hcl.Range // Final token of the body, for reporting missing items
|
||||
}
|
||||
|
||||
// Assert that *Body implements hcl.Body
|
||||
var assertBodyImplBody hcl.Body = &Body{}
|
||||
|
||||
func (b *Body) walkChildNodes(w internalWalkFunc) {
|
||||
w(b.Attributes)
|
||||
w(b.Blocks)
|
||||
}
|
||||
|
||||
func (b *Body) Range() hcl.Range {
|
||||
return b.SrcRange
|
||||
}
|
||||
|
||||
func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
|
||||
content, remainHCL, diags := b.PartialContent(schema)
|
||||
|
||||
// No we'll see if anything actually remains, to produce errors about
|
||||
// extraneous items.
|
||||
remain := remainHCL.(*Body)
|
||||
|
||||
for name, attr := range b.Attributes {
|
||||
if _, hidden := remain.hiddenAttrs[name]; !hidden {
|
||||
var suggestions []string
|
||||
for _, attrS := range schema.Attributes {
|
||||
if _, defined := content.Attributes[attrS.Name]; defined {
|
||||
continue
|
||||
}
|
||||
suggestions = append(suggestions, attrS.Name)
|
||||
}
|
||||
suggestion := nameSuggestion(name, suggestions)
|
||||
if suggestion != "" {
|
||||
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
|
||||
} else {
|
||||
// Is there a block of the same name?
|
||||
for _, blockS := range schema.Blocks {
|
||||
if blockS.Type == name {
|
||||
suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported argument",
|
||||
Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion),
|
||||
Subject: &attr.NameRange,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range b.Blocks {
|
||||
blockTy := block.Type
|
||||
if _, hidden := remain.hiddenBlocks[blockTy]; !hidden {
|
||||
var suggestions []string
|
||||
for _, blockS := range schema.Blocks {
|
||||
suggestions = append(suggestions, blockS.Type)
|
||||
}
|
||||
suggestion := nameSuggestion(blockTy, suggestions)
|
||||
if suggestion != "" {
|
||||
suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
|
||||
} else {
|
||||
// Is there an attribute of the same name?
|
||||
for _, attrS := range schema.Attributes {
|
||||
if attrS.Name == blockTy {
|
||||
suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported block type",
|
||||
Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion),
|
||||
Subject: &block.TypeRange,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return content, diags
|
||||
}
|
||||
|
||||
func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
|
||||
attrs := make(hcl.Attributes)
|
||||
var blocks hcl.Blocks
|
||||
var diags hcl.Diagnostics
|
||||
hiddenAttrs := make(map[string]struct{})
|
||||
hiddenBlocks := make(map[string]struct{})
|
||||
|
||||
if b.hiddenAttrs != nil {
|
||||
for k, v := range b.hiddenAttrs {
|
||||
hiddenAttrs[k] = v
|
||||
}
|
||||
}
|
||||
if b.hiddenBlocks != nil {
|
||||
for k, v := range b.hiddenBlocks {
|
||||
hiddenBlocks[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, attrS := range schema.Attributes {
|
||||
name := attrS.Name
|
||||
attr, exists := b.Attributes[name]
|
||||
_, hidden := hiddenAttrs[name]
|
||||
if hidden || !exists {
|
||||
if attrS.Required {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Missing required argument",
|
||||
Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
|
||||
Subject: b.MissingItemRange().Ptr(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hiddenAttrs[name] = struct{}{}
|
||||
attrs[name] = attr.AsHCLAttribute()
|
||||
}
|
||||
|
||||
blocksWanted := make(map[string]hcl.BlockHeaderSchema)
|
||||
for _, blockS := range schema.Blocks {
|
||||
blocksWanted[blockS.Type] = blockS
|
||||
}
|
||||
|
||||
for _, block := range b.Blocks {
|
||||
if _, hidden := hiddenBlocks[block.Type]; hidden {
|
||||
continue
|
||||
}
|
||||
blockS, wanted := blocksWanted[block.Type]
|
||||
if !wanted {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(block.Labels) > len(blockS.LabelNames) {
|
||||
name := block.Type
|
||||
if len(blockS.LabelNames) == 0 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Extraneous label for %s", name),
|
||||
Detail: fmt.Sprintf(
|
||||
"No labels are expected for %s blocks.", name,
|
||||
),
|
||||
Subject: block.LabelRanges[0].Ptr(),
|
||||
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
|
||||
})
|
||||
} else {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Extraneous label for %s", name),
|
||||
Detail: fmt.Sprintf(
|
||||
"Only %d labels (%s) are expected for %s blocks.",
|
||||
len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name,
|
||||
),
|
||||
Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(),
|
||||
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(block.Labels) < len(blockS.LabelNames) {
|
||||
name := block.Type
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name),
|
||||
Detail: fmt.Sprintf(
|
||||
"All %s blocks must have %d labels (%s).",
|
||||
name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "),
|
||||
),
|
||||
Subject: &block.OpenBraceRange,
|
||||
Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
blocks = append(blocks, block.AsHCLBlock())
|
||||
}
|
||||
|
||||
// We hide blocks only after we've processed all of them, since otherwise
|
||||
// we can't process more than one of the same type.
|
||||
for _, blockS := range schema.Blocks {
|
||||
hiddenBlocks[blockS.Type] = struct{}{}
|
||||
}
|
||||
|
||||
remain := &Body{
|
||||
Attributes: b.Attributes,
|
||||
Blocks: b.Blocks,
|
||||
|
||||
hiddenAttrs: hiddenAttrs,
|
||||
hiddenBlocks: hiddenBlocks,
|
||||
|
||||
SrcRange: b.SrcRange,
|
||||
EndRange: b.EndRange,
|
||||
}
|
||||
|
||||
return &hcl.BodyContent{
|
||||
Attributes: attrs,
|
||||
Blocks: blocks,
|
||||
|
||||
MissingItemRange: b.MissingItemRange(),
|
||||
}, remain, diags
|
||||
}
|
||||
|
||||
func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
|
||||
attrs := make(hcl.Attributes)
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
if len(b.Blocks) > 0 {
|
||||
example := b.Blocks[0]
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: fmt.Sprintf("Unexpected %q block", example.Type),
|
||||
Detail: "Blocks are not allowed here.",
|
||||
Subject: &example.TypeRange,
|
||||
})
|
||||
// we will continue processing anyway, and return the attributes
|
||||
// we are able to find so that certain analyses can still be done
|
||||
// in the face of errors.
|
||||
}
|
||||
|
||||
if b.Attributes == nil {
|
||||
return attrs, diags
|
||||
}
|
||||
|
||||
for name, attr := range b.Attributes {
|
||||
if _, hidden := b.hiddenAttrs[name]; hidden {
|
||||
continue
|
||||
}
|
||||
attrs[name] = attr.AsHCLAttribute()
|
||||
}
|
||||
|
||||
return attrs, diags
|
||||
}
|
||||
|
||||
func (b *Body) MissingItemRange() hcl.Range {
|
||||
return hcl.Range{
|
||||
Filename: b.SrcRange.Filename,
|
||||
Start: b.SrcRange.Start,
|
||||
End: b.SrcRange.Start,
|
||||
}
|
||||
}
|
||||
|
||||
// Attributes is the collection of attribute definitions within a body.
|
||||
type Attributes map[string]*Attribute
|
||||
|
||||
func (a Attributes) walkChildNodes(w internalWalkFunc) {
|
||||
for _, attr := range a {
|
||||
w(attr)
|
||||
}
|
||||
}
|
||||
|
||||
// Range returns the range of some arbitrary point within the set of
|
||||
// attributes, or an invalid range if there are no attributes.
|
||||
//
|
||||
// This is provided only to complete the Node interface, but has no practical
|
||||
// use.
|
||||
func (a Attributes) Range() hcl.Range {
|
||||
// An attributes doesn't really have a useful range to report, since
|
||||
// it's just a grouping construct. So we'll arbitrarily take the
|
||||
// range of one of the attributes, or produce an invalid range if we have
|
||||
// none. In practice, there's little reason to ask for the range of
|
||||
// an Attributes.
|
||||
for _, attr := range a {
|
||||
return attr.Range()
|
||||
}
|
||||
return hcl.Range{
|
||||
Filename: "<unknown>",
|
||||
}
|
||||
}
|
||||
|
||||
// Attribute represents a single attribute definition within a body.
|
||||
type Attribute struct {
|
||||
Name string
|
||||
Expr Expression
|
||||
|
||||
SrcRange hcl.Range
|
||||
NameRange hcl.Range
|
||||
EqualsRange hcl.Range
|
||||
}
|
||||
|
||||
func (a *Attribute) walkChildNodes(w internalWalkFunc) {
|
||||
w(a.Expr)
|
||||
}
|
||||
|
||||
func (a *Attribute) Range() hcl.Range {
|
||||
return a.SrcRange
|
||||
}
|
||||
|
||||
// AsHCLAttribute returns the block data expressed as a *hcl.Attribute.
|
||||
func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return &hcl.Attribute{
|
||||
Name: a.Name,
|
||||
Expr: a.Expr,
|
||||
|
||||
Range: a.SrcRange,
|
||||
NameRange: a.NameRange,
|
||||
}
|
||||
}
|
||||
|
||||
// Blocks is the list of nested blocks within a body.
|
||||
type Blocks []*Block
|
||||
|
||||
func (bs Blocks) walkChildNodes(w internalWalkFunc) {
|
||||
for _, block := range bs {
|
||||
w(block)
|
||||
}
|
||||
}
|
||||
|
||||
// Range returns the range of some arbitrary point within the list of
|
||||
// blocks, or an invalid range if there are no blocks.
|
||||
//
|
||||
// This is provided only to complete the Node interface, but has no practical
|
||||
// use.
|
||||
func (bs Blocks) Range() hcl.Range {
|
||||
if len(bs) > 0 {
|
||||
return bs[0].Range()
|
||||
}
|
||||
return hcl.Range{
|
||||
Filename: "<unknown>",
|
||||
}
|
||||
}
|
||||
|
||||
// Block represents a nested block structure
|
||||
type Block struct {
|
||||
Type string
|
||||
Labels []string
|
||||
Body *Body
|
||||
|
||||
TypeRange hcl.Range
|
||||
LabelRanges []hcl.Range
|
||||
OpenBraceRange hcl.Range
|
||||
CloseBraceRange hcl.Range
|
||||
}
|
||||
|
||||
func (b *Block) walkChildNodes(w internalWalkFunc) {
|
||||
w(b.Body)
|
||||
}
|
||||
|
||||
func (b *Block) Range() hcl.Range {
|
||||
return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
|
||||
}
|
||||
|
||||
func (b *Block) DefRange() hcl.Range {
|
||||
return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange)
|
||||
}
|
||||
118
vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// The methods in this file are all optional extension methods that serve to
|
||||
// implement the methods of the same name on *hcl.File when its root body
|
||||
// is provided by this package.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// BlocksAtPos implements the method of the same name for an *hcl.File that
|
||||
// is backed by a *Body.
|
||||
func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block {
|
||||
list, _ := b.blocksAtPos(pos, true)
|
||||
return list
|
||||
}
|
||||
|
||||
// InnermostBlockAtPos implements the method of the same name for an *hcl.File
|
||||
// that is backed by a *Body.
|
||||
func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block {
|
||||
_, innermost := b.blocksAtPos(pos, false)
|
||||
return innermost.AsHCLBlock()
|
||||
}
|
||||
|
||||
// OutermostBlockAtPos implements the method of the same name for an *hcl.File
|
||||
// that is backed by a *Body.
|
||||
func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block {
|
||||
return b.outermostBlockAtPos(pos).AsHCLBlock()
|
||||
}
|
||||
|
||||
// blocksAtPos is the internal engine of both BlocksAtPos and
|
||||
// InnermostBlockAtPos, which both need to do the same logic but return a
|
||||
// differently-shaped result.
|
||||
//
|
||||
// list is nil if makeList is false, avoiding an allocation. Innermost is
|
||||
// always set, and if the returned list is non-nil it will always match the
|
||||
// final element from that list.
|
||||
func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) {
|
||||
current := b
|
||||
|
||||
Blocks:
|
||||
for current != nil {
|
||||
for _, block := range current.Blocks {
|
||||
wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
|
||||
if wholeRange.ContainsPos(pos) {
|
||||
innermost = block
|
||||
if makeList {
|
||||
list = append(list, innermost.AsHCLBlock())
|
||||
}
|
||||
current = block.Body
|
||||
continue Blocks
|
||||
}
|
||||
}
|
||||
|
||||
// If we fall out here then none of the current body's nested blocks
|
||||
// contain the position we are looking for, and so we're done.
|
||||
break
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// outermostBlockAtPos is the internal version of OutermostBlockAtPos that
|
||||
// returns a hclsyntax.Block rather than an hcl.Block, allowing for further
|
||||
// analysis if necessary.
|
||||
func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block {
|
||||
// This is similar to blocksAtPos, but simpler because we know it only
|
||||
// ever needs to search the first level of nested blocks.
|
||||
|
||||
for _, block := range b.Blocks {
|
||||
wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
|
||||
if wholeRange.ContainsPos(pos) {
|
||||
return block
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttributeAtPos implements the method of the same name for an *hcl.File
|
||||
// that is backed by a *Body.
|
||||
func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute {
|
||||
return b.attributeAtPos(pos).AsHCLAttribute()
|
||||
}
|
||||
|
||||
// attributeAtPos is the internal version of AttributeAtPos that returns a
|
||||
// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if
|
||||
// necessary.
|
||||
func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute {
|
||||
searchBody := b
|
||||
_, block := b.blocksAtPos(pos, false)
|
||||
if block != nil {
|
||||
searchBody = block.Body
|
||||
}
|
||||
|
||||
for _, attr := range searchBody.Attributes {
|
||||
if attr.SrcRange.ContainsPos(pos) {
|
||||
return attr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OutermostExprAtPos implements the method of the same name for an *hcl.File
|
||||
// that is backed by a *Body.
|
||||
func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression {
|
||||
attr := b.attributeAtPos(pos)
|
||||
if attr == nil {
|
||||
return nil
|
||||
}
|
||||
if !attr.Expr.Range().ContainsPos(pos) {
|
||||
return nil
|
||||
}
|
||||
return attr.Expr
|
||||
}
|
||||
320
vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go
generated
vendored
Normal file
320
vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go
generated
vendored
Normal file
@@ -0,0 +1,320 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/apparentlymart/go-textseg/textseg"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// Token represents a sequence of bytes from some HCL code that has been
|
||||
// tagged with a type and its range within the source file.
|
||||
type Token struct {
|
||||
Type TokenType
|
||||
Bytes []byte
|
||||
Range hcl.Range
|
||||
}
|
||||
|
||||
// Tokens is a slice of Token.
|
||||
type Tokens []Token
|
||||
|
||||
// TokenType is an enumeration used for the Type field on Token.
|
||||
type TokenType rune
|
||||
|
||||
const (
|
||||
// Single-character tokens are represented by their own character, for
|
||||
// convenience in producing these within the scanner. However, the values
|
||||
// are otherwise arbitrary and just intended to be mnemonic for humans
|
||||
// who might see them in debug output.
|
||||
|
||||
TokenOBrace TokenType = '{'
|
||||
TokenCBrace TokenType = '}'
|
||||
TokenOBrack TokenType = '['
|
||||
TokenCBrack TokenType = ']'
|
||||
TokenOParen TokenType = '('
|
||||
TokenCParen TokenType = ')'
|
||||
TokenOQuote TokenType = '«'
|
||||
TokenCQuote TokenType = '»'
|
||||
TokenOHeredoc TokenType = 'H'
|
||||
TokenCHeredoc TokenType = 'h'
|
||||
|
||||
TokenStar TokenType = '*'
|
||||
TokenSlash TokenType = '/'
|
||||
TokenPlus TokenType = '+'
|
||||
TokenMinus TokenType = '-'
|
||||
TokenPercent TokenType = '%'
|
||||
|
||||
TokenEqual TokenType = '='
|
||||
TokenEqualOp TokenType = '≔'
|
||||
TokenNotEqual TokenType = '≠'
|
||||
TokenLessThan TokenType = '<'
|
||||
TokenLessThanEq TokenType = '≤'
|
||||
TokenGreaterThan TokenType = '>'
|
||||
TokenGreaterThanEq TokenType = '≥'
|
||||
|
||||
TokenAnd TokenType = '∧'
|
||||
TokenOr TokenType = '∨'
|
||||
TokenBang TokenType = '!'
|
||||
|
||||
TokenDot TokenType = '.'
|
||||
TokenComma TokenType = ','
|
||||
|
||||
TokenEllipsis TokenType = '…'
|
||||
TokenFatArrow TokenType = '⇒'
|
||||
|
||||
TokenQuestion TokenType = '?'
|
||||
TokenColon TokenType = ':'
|
||||
|
||||
TokenTemplateInterp TokenType = '∫'
|
||||
TokenTemplateControl TokenType = 'λ'
|
||||
TokenTemplateSeqEnd TokenType = '∎'
|
||||
|
||||
TokenQuotedLit TokenType = 'Q' // might contain backslash escapes
|
||||
TokenStringLit TokenType = 'S' // cannot contain backslash escapes
|
||||
TokenNumberLit TokenType = 'N'
|
||||
TokenIdent TokenType = 'I'
|
||||
|
||||
TokenComment TokenType = 'C'
|
||||
|
||||
TokenNewline TokenType = '\n'
|
||||
TokenEOF TokenType = '␄'
|
||||
|
||||
// The rest are not used in the language but recognized by the scanner so
|
||||
// we can generate good diagnostics in the parser when users try to write
|
||||
// things that might work in other languages they are familiar with, or
|
||||
// simply make incorrect assumptions about the HCL language.
|
||||
|
||||
TokenBitwiseAnd TokenType = '&'
|
||||
TokenBitwiseOr TokenType = '|'
|
||||
TokenBitwiseNot TokenType = '~'
|
||||
TokenBitwiseXor TokenType = '^'
|
||||
TokenStarStar TokenType = '➚'
|
||||
TokenApostrophe TokenType = '\''
|
||||
TokenBacktick TokenType = '`'
|
||||
TokenSemicolon TokenType = ';'
|
||||
TokenTabs TokenType = '␉'
|
||||
TokenInvalid TokenType = '<27>'
|
||||
TokenBadUTF8 TokenType = '💩'
|
||||
TokenQuotedNewline TokenType = ''
|
||||
|
||||
// TokenNil is a placeholder for when a token is required but none is
|
||||
// available, e.g. when reporting errors. The scanner will never produce
|
||||
// this as part of a token stream.
|
||||
TokenNil TokenType = '\x00'
|
||||
)
|
||||
|
||||
func (t TokenType) GoString() string {
|
||||
return fmt.Sprintf("hclsyntax.%s", t.String())
|
||||
}
|
||||
|
||||
type scanMode int
|
||||
|
||||
const (
|
||||
scanNormal scanMode = iota
|
||||
scanTemplate
|
||||
scanIdentOnly
|
||||
)
|
||||
|
||||
type tokenAccum struct {
|
||||
Filename string
|
||||
Bytes []byte
|
||||
Pos hcl.Pos
|
||||
Tokens []Token
|
||||
StartByte int
|
||||
}
|
||||
|
||||
func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
|
||||
// Walk through our buffer to figure out how much we need to adjust
|
||||
// the start pos to get our end pos.
|
||||
|
||||
start := f.Pos
|
||||
start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
|
||||
start.Byte = startOfs + f.StartByte
|
||||
|
||||
end := start
|
||||
end.Byte = endOfs + f.StartByte
|
||||
b := f.Bytes[startOfs:endOfs]
|
||||
for len(b) > 0 {
|
||||
advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
|
||||
if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') {
|
||||
end.Line++
|
||||
end.Column = 1
|
||||
} else {
|
||||
end.Column++
|
||||
}
|
||||
b = b[advance:]
|
||||
}
|
||||
|
||||
f.Pos = end
|
||||
|
||||
f.Tokens = append(f.Tokens, Token{
|
||||
Type: ty,
|
||||
Bytes: f.Bytes[startOfs:endOfs],
|
||||
Range: hcl.Range{
|
||||
Filename: f.Filename,
|
||||
Start: start,
|
||||
End: end,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type heredocInProgress struct {
|
||||
Marker []byte
|
||||
StartOfLine bool
|
||||
}
|
||||
|
||||
func tokenOpensFlushHeredoc(tok Token) bool {
|
||||
if tok.Type != TokenOHeredoc {
|
||||
return false
|
||||
}
|
||||
return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'})
|
||||
}
|
||||
|
||||
// checkInvalidTokens does a simple pass across the given tokens and generates
|
||||
// diagnostics for tokens that should _never_ appear in HCL source. This
|
||||
// is intended to avoid the need for the parser to have special support
|
||||
// for them all over.
|
||||
//
|
||||
// Returns a diagnostics with no errors if everything seems acceptable.
|
||||
// Otherwise, returns zero or more error diagnostics, though tries to limit
|
||||
// repetition of the same information.
|
||||
func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
toldBitwise := 0
|
||||
toldExponent := 0
|
||||
toldBacktick := 0
|
||||
toldApostrophe := 0
|
||||
toldSemicolon := 0
|
||||
toldTabs := 0
|
||||
toldBadUTF8 := 0
|
||||
|
||||
for _, tok := range tokens {
|
||||
// copy token so it's safe to point to it
|
||||
tok := tok
|
||||
|
||||
switch tok.Type {
|
||||
case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
|
||||
if toldBitwise < 4 {
|
||||
var suggestion string
|
||||
switch tok.Type {
|
||||
case TokenBitwiseAnd:
|
||||
suggestion = " Did you mean boolean AND (\"&&\")?"
|
||||
case TokenBitwiseOr:
|
||||
suggestion = " Did you mean boolean OR (\"&&\")?"
|
||||
case TokenBitwiseNot:
|
||||
suggestion = " Did you mean boolean NOT (\"!\")?"
|
||||
}
|
||||
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported operator",
|
||||
Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion),
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
toldBitwise++
|
||||
}
|
||||
case TokenStarStar:
|
||||
if toldExponent < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Unsupported operator",
|
||||
Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
toldExponent++
|
||||
}
|
||||
case TokenBacktick:
|
||||
// Only report for alternating (even) backticks, so we won't report both start and ends of the same
|
||||
// backtick-quoted string.
|
||||
if (toldBacktick % 2) == 0 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
}
|
||||
if toldBacktick <= 2 {
|
||||
toldBacktick++
|
||||
}
|
||||
case TokenApostrophe:
|
||||
if (toldApostrophe % 2) == 0 {
|
||||
newDiag := &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "Single quotes are not valid. Use double quotes (\") to enclose strings.",
|
||||
Subject: &tok.Range,
|
||||
}
|
||||
diags = append(diags, newDiag)
|
||||
}
|
||||
if toldApostrophe <= 2 {
|
||||
toldApostrophe++
|
||||
}
|
||||
case TokenSemicolon:
|
||||
if toldSemicolon < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "The \";\" character is not valid. Use newlines to separate arguments and blocks, and commas to separate items in collection values.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
toldSemicolon++
|
||||
}
|
||||
case TokenTabs:
|
||||
if toldTabs < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "Tab characters may not be used. The recommended indentation style is two spaces per indent.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
toldTabs++
|
||||
}
|
||||
case TokenBadUTF8:
|
||||
if toldBadUTF8 < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character encoding",
|
||||
Detail: "All input files must be UTF-8 encoded. Ensure that UTF-8 encoding is selected in your editor.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
|
||||
toldBadUTF8++
|
||||
}
|
||||
case TokenQuotedNewline:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid multi-line string",
|
||||
Detail: "Quoted strings may not be split over multiple lines. To produce a multi-line string, either use the \\n escape to represent a newline character or use the \"heredoc\" multi-line template syntax.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
case TokenInvalid:
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid character",
|
||||
Detail: "This character is not used within the language.",
|
||||
Subject: &tok.Range,
|
||||
})
|
||||
}
|
||||
}
|
||||
return diags
|
||||
}
|
||||
|
||||
var utf8BOM = []byte{0xef, 0xbb, 0xbf}
|
||||
|
||||
// stripUTF8BOM checks whether the given buffer begins with a UTF-8 byte order
|
||||
// mark (0xEF 0xBB 0xBF) and, if so, returns a truncated slice with the same
|
||||
// backing array but with the BOM skipped.
|
||||
//
|
||||
// If there is no BOM present, the given slice is returned verbatim.
|
||||
func stripUTF8BOM(src []byte) []byte {
|
||||
if bytes.HasPrefix(src, utf8BOM) {
|
||||
return src[3:]
|
||||
}
|
||||
return src
|
||||
}
|
||||
131
vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go
generated
vendored
Normal file
131
vendor/github.com/hashicorp/hcl/v2/hclsyntax/token_type_string.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Code generated by "stringer -type TokenType -output token_type_string.go"; DO NOT EDIT.
|
||||
|
||||
package hclsyntax
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[TokenOBrace-123]
|
||||
_ = x[TokenCBrace-125]
|
||||
_ = x[TokenOBrack-91]
|
||||
_ = x[TokenCBrack-93]
|
||||
_ = x[TokenOParen-40]
|
||||
_ = x[TokenCParen-41]
|
||||
_ = x[TokenOQuote-171]
|
||||
_ = x[TokenCQuote-187]
|
||||
_ = x[TokenOHeredoc-72]
|
||||
_ = x[TokenCHeredoc-104]
|
||||
_ = x[TokenStar-42]
|
||||
_ = x[TokenSlash-47]
|
||||
_ = x[TokenPlus-43]
|
||||
_ = x[TokenMinus-45]
|
||||
_ = x[TokenPercent-37]
|
||||
_ = x[TokenEqual-61]
|
||||
_ = x[TokenEqualOp-8788]
|
||||
_ = x[TokenNotEqual-8800]
|
||||
_ = x[TokenLessThan-60]
|
||||
_ = x[TokenLessThanEq-8804]
|
||||
_ = x[TokenGreaterThan-62]
|
||||
_ = x[TokenGreaterThanEq-8805]
|
||||
_ = x[TokenAnd-8743]
|
||||
_ = x[TokenOr-8744]
|
||||
_ = x[TokenBang-33]
|
||||
_ = x[TokenDot-46]
|
||||
_ = x[TokenComma-44]
|
||||
_ = x[TokenEllipsis-8230]
|
||||
_ = x[TokenFatArrow-8658]
|
||||
_ = x[TokenQuestion-63]
|
||||
_ = x[TokenColon-58]
|
||||
_ = x[TokenTemplateInterp-8747]
|
||||
_ = x[TokenTemplateControl-955]
|
||||
_ = x[TokenTemplateSeqEnd-8718]
|
||||
_ = x[TokenQuotedLit-81]
|
||||
_ = x[TokenStringLit-83]
|
||||
_ = x[TokenNumberLit-78]
|
||||
_ = x[TokenIdent-73]
|
||||
_ = x[TokenComment-67]
|
||||
_ = x[TokenNewline-10]
|
||||
_ = x[TokenEOF-9220]
|
||||
_ = x[TokenBitwiseAnd-38]
|
||||
_ = x[TokenBitwiseOr-124]
|
||||
_ = x[TokenBitwiseNot-126]
|
||||
_ = x[TokenBitwiseXor-94]
|
||||
_ = x[TokenStarStar-10138]
|
||||
_ = x[TokenApostrophe-39]
|
||||
_ = x[TokenBacktick-96]
|
||||
_ = x[TokenSemicolon-59]
|
||||
_ = x[TokenTabs-9225]
|
||||
_ = x[TokenInvalid-65533]
|
||||
_ = x[TokenBadUTF8-128169]
|
||||
_ = x[TokenQuotedNewline-9252]
|
||||
_ = x[TokenNil-0]
|
||||
}
|
||||
|
||||
const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenApostropheTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenQuotedNewlineTokenStarStarTokenInvalidTokenBadUTF8"
|
||||
|
||||
var _TokenType_map = map[TokenType]string{
|
||||
0: _TokenType_name[0:8],
|
||||
10: _TokenType_name[8:20],
|
||||
33: _TokenType_name[20:29],
|
||||
37: _TokenType_name[29:41],
|
||||
38: _TokenType_name[41:56],
|
||||
39: _TokenType_name[56:71],
|
||||
40: _TokenType_name[71:82],
|
||||
41: _TokenType_name[82:93],
|
||||
42: _TokenType_name[93:102],
|
||||
43: _TokenType_name[102:111],
|
||||
44: _TokenType_name[111:121],
|
||||
45: _TokenType_name[121:131],
|
||||
46: _TokenType_name[131:139],
|
||||
47: _TokenType_name[139:149],
|
||||
58: _TokenType_name[149:159],
|
||||
59: _TokenType_name[159:173],
|
||||
60: _TokenType_name[173:186],
|
||||
61: _TokenType_name[186:196],
|
||||
62: _TokenType_name[196:212],
|
||||
63: _TokenType_name[212:225],
|
||||
67: _TokenType_name[225:237],
|
||||
72: _TokenType_name[237:250],
|
||||
73: _TokenType_name[250:260],
|
||||
78: _TokenType_name[260:274],
|
||||
81: _TokenType_name[274:288],
|
||||
83: _TokenType_name[288:302],
|
||||
91: _TokenType_name[302:313],
|
||||
93: _TokenType_name[313:324],
|
||||
94: _TokenType_name[324:339],
|
||||
96: _TokenType_name[339:352],
|
||||
104: _TokenType_name[352:365],
|
||||
123: _TokenType_name[365:376],
|
||||
124: _TokenType_name[376:390],
|
||||
125: _TokenType_name[390:401],
|
||||
126: _TokenType_name[401:416],
|
||||
171: _TokenType_name[416:427],
|
||||
187: _TokenType_name[427:438],
|
||||
955: _TokenType_name[438:458],
|
||||
8230: _TokenType_name[458:471],
|
||||
8658: _TokenType_name[471:484],
|
||||
8718: _TokenType_name[484:503],
|
||||
8743: _TokenType_name[503:511],
|
||||
8744: _TokenType_name[511:518],
|
||||
8747: _TokenType_name[518:537],
|
||||
8788: _TokenType_name[537:549],
|
||||
8800: _TokenType_name[549:562],
|
||||
8804: _TokenType_name[562:577],
|
||||
8805: _TokenType_name[577:595],
|
||||
9220: _TokenType_name[595:603],
|
||||
9225: _TokenType_name[603:612],
|
||||
9252: _TokenType_name[612:630],
|
||||
10138: _TokenType_name[630:643],
|
||||
65533: _TokenType_name[643:655],
|
||||
128169: _TokenType_name[655:667],
|
||||
}
|
||||
|
||||
func (i TokenType) String() string {
|
||||
if str, ok := _TokenType_map[i]; ok {
|
||||
return str
|
||||
}
|
||||
return "TokenType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
335
vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb
generated
vendored
Normal file
335
vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode2ragel.rb
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env ruby
|
||||
#
|
||||
# This scripted has been updated to accept more command-line arguments:
|
||||
#
|
||||
# -u, --url URL to process
|
||||
# -m, --machine Machine name
|
||||
# -p, --properties Properties to add to the machine
|
||||
# -o, --output Write output to file
|
||||
#
|
||||
# Updated by: Marty Schoch <marty.schoch@gmail.com>
|
||||
#
|
||||
# This script uses the unicode spec to generate a Ragel state machine
|
||||
# that recognizes unicode alphanumeric characters. It generates 5
|
||||
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
|
||||
# Currently supported encodings are UTF-8 [default] and UCS-4.
|
||||
#
|
||||
# Usage: unicode2ragel.rb [options]
|
||||
# -e, --encoding [ucs4 | utf8] Data encoding
|
||||
# -h, --help Show this message
|
||||
#
|
||||
# This script was originally written as part of the Ferret search
|
||||
# engine library.
|
||||
#
|
||||
# Author: Rakan El-Khalil <rakan@well.com>
|
||||
|
||||
require 'optparse'
|
||||
require 'open-uri'
|
||||
|
||||
ENCODINGS = [ :utf8, :ucs4 ]
|
||||
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
|
||||
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
|
||||
DEFAULT_MACHINE_NAME= "WChar"
|
||||
|
||||
###
|
||||
# Display vars & default option
|
||||
|
||||
TOTAL_WIDTH = 80
|
||||
RANGE_WIDTH = 23
|
||||
@encoding = :utf8
|
||||
@chart_url = DEFAULT_CHART_URL
|
||||
machine_name = DEFAULT_MACHINE_NAME
|
||||
properties = []
|
||||
@output = $stdout
|
||||
|
||||
###
|
||||
# Option parsing
|
||||
|
||||
cli_opts = OptionParser.new do |opts|
|
||||
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
|
||||
@encoding = o.downcase.to_sym
|
||||
end
|
||||
opts.on("-h", "--help", "Show this message") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
opts.on("-u", "--url URL", "URL to process") do |o|
|
||||
@chart_url = o
|
||||
end
|
||||
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
|
||||
machine_name = o
|
||||
end
|
||||
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
|
||||
properties = o
|
||||
end
|
||||
opts.on("-o", "--output FILE", "output file") do |o|
|
||||
@output = File.new(o, "w+")
|
||||
end
|
||||
end
|
||||
|
||||
cli_opts.parse(ARGV)
|
||||
unless ENCODINGS.member? @encoding
|
||||
puts "Invalid encoding: #{@encoding}"
|
||||
puts cli_opts
|
||||
exit
|
||||
end
|
||||
|
||||
##
|
||||
# Downloads the document at url and yields every alpha line's hex
|
||||
# range and description.
|
||||
|
||||
def each_alpha( url, property )
|
||||
open( url ) do |file|
|
||||
file.each_line do |line|
|
||||
next if line =~ /^#/;
|
||||
next if line !~ /; #{property} #/;
|
||||
|
||||
range, description = line.split(/;/)
|
||||
range.strip!
|
||||
description.gsub!(/.*#/, '').strip!
|
||||
|
||||
if range =~ /\.\./
|
||||
start, stop = range.split '..'
|
||||
else start = stop = range
|
||||
end
|
||||
|
||||
yield start.hex .. stop.hex, description
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Formats to hex at minimum width
|
||||
|
||||
def to_hex( n )
|
||||
r = "%0X" % n
|
||||
r = "0#{r}" unless (r.length % 2).zero?
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# UCS4 is just a straight hex conversion of the unicode codepoint.
|
||||
|
||||
def to_ucs4( range )
|
||||
rangestr = "0x" + to_hex(range.begin)
|
||||
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
|
||||
[ rangestr ]
|
||||
end
|
||||
|
||||
##
|
||||
# 0x00 - 0x7f -> 0zzzzzzz[7]
|
||||
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
|
||||
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
|
||||
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
|
||||
|
||||
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
|
||||
|
||||
def to_utf8_enc( n )
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0x7ff
|
||||
y = 0xc0 | (n >> 6)
|
||||
z = 0x80 | (n & 0x3f)
|
||||
r = y << 8 | z
|
||||
elsif n <= 0xffff
|
||||
x = 0xe0 | (n >> 12)
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = x << 16 | y << 8 | z
|
||||
elsif n <= 0x10ffff
|
||||
w = 0xf0 | (n >> 18)
|
||||
x = 0x80 | (n >> 12) & 0x3f
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = w << 24 | x << 16 | y << 8 | z
|
||||
end
|
||||
|
||||
to_hex(r)
|
||||
end
|
||||
|
||||
def from_utf8_enc( n )
|
||||
n = n.hex
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0xdfff
|
||||
y = (n >> 8) & 0x1f
|
||||
z = n & 0x3f
|
||||
r = y << 6 | z
|
||||
elsif n <= 0xefffff
|
||||
x = (n >> 16) & 0x0f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = x << 10 | y << 6 | z
|
||||
elsif n <= 0xf7ffffff
|
||||
w = (n >> 24) & 0x07
|
||||
x = (n >> 16) & 0x3f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = w << 18 | x << 12 | y << 6 | z
|
||||
end
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# Given a range, splits it up into ranges that can be continuously
|
||||
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
|
||||
# This is not strictly needed since the current [5.1] unicode standard
|
||||
# doesn't have ranges that straddle utf8 boundaries. This is included
|
||||
# for completeness as there is no telling if that will ever change.
|
||||
|
||||
def utf8_ranges( range )
|
||||
ranges = []
|
||||
UTF8_BOUNDARIES.each do |max|
|
||||
if range.begin <= max
|
||||
if range.end <= max
|
||||
ranges << range
|
||||
return ranges
|
||||
end
|
||||
|
||||
ranges << (range.begin .. max)
|
||||
range = (max + 1) .. range.end
|
||||
end
|
||||
end
|
||||
ranges
|
||||
end
|
||||
|
||||
def build_range( start, stop )
|
||||
size = start.size/2
|
||||
left = size - 1
|
||||
return [""] if size < 1
|
||||
|
||||
a = start[0..1]
|
||||
b = stop[0..1]
|
||||
|
||||
###
|
||||
# Shared prefix
|
||||
|
||||
if a == b
|
||||
return build_range(start[2..-1], stop[2..-1]).map do |elt|
|
||||
"0x#{a} " + elt
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Unshared prefix, end of run
|
||||
|
||||
return ["0x#{a}..0x#{b} "] if left.zero?
|
||||
|
||||
###
|
||||
# Unshared prefix, not end of run
|
||||
# Range can be 0x123456..0x56789A
|
||||
# Which is equivalent to:
|
||||
# 0x123456 .. 0x12FFFF
|
||||
# 0x130000 .. 0x55FFFF
|
||||
# 0x560000 .. 0x56789A
|
||||
|
||||
ret = []
|
||||
ret << build_range(start, a + "FF" * left)
|
||||
|
||||
###
|
||||
# Only generate middle range if need be.
|
||||
|
||||
if a.hex+1 != b.hex
|
||||
max = to_hex(b.hex - 1)
|
||||
max = "FF" if b == "FF"
|
||||
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
|
||||
end
|
||||
|
||||
###
|
||||
# Don't generate last range if it is covered by first range
|
||||
|
||||
ret << build_range(b + "00" * left, stop) unless b == "FF"
|
||||
ret.flatten!
|
||||
end
|
||||
|
||||
def to_utf8( range )
|
||||
utf8_ranges( range ).map do |r|
|
||||
begin_enc = to_utf8_enc(r.begin)
|
||||
end_enc = to_utf8_enc(r.end)
|
||||
build_range begin_enc, end_enc
|
||||
end.flatten!
|
||||
end
|
||||
|
||||
##
|
||||
# Perform a 3-way comparison of the number of codepoints advertised by
|
||||
# the unicode spec for the given range, the originally parsed range,
|
||||
# and the resulting utf8 encoded range.
|
||||
|
||||
def count_codepoints( code )
|
||||
code.split(' ').inject(1) do |acc, elt|
|
||||
if elt =~ /0x(.+)\.\.0x(.+)/
|
||||
if @encoding == :utf8
|
||||
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
|
||||
else
|
||||
acc * ($2.hex - $1.hex + 1)
|
||||
end
|
||||
else
|
||||
acc
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def is_valid?( range, desc, codes )
|
||||
spec_count = 1
|
||||
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
|
||||
range_count = range.end - range.begin + 1
|
||||
|
||||
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
|
||||
sum == spec_count and sum == range_count
|
||||
end
|
||||
|
||||
##
|
||||
# Generate the state maching to stdout
|
||||
|
||||
def generate_machine( name, property )
|
||||
pipe = " "
|
||||
@output.puts " #{name} = "
|
||||
each_alpha( @chart_url, property ) do |range, desc|
|
||||
|
||||
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
|
||||
|
||||
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
|
||||
# is_valid? range, desc, codes
|
||||
|
||||
range_width = codes.map { |a| a.size }.max
|
||||
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
|
||||
|
||||
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
|
||||
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
|
||||
|
||||
if desc.size > desc_width
|
||||
desc = desc[0..desc_width - 4] + "..."
|
||||
end
|
||||
|
||||
codes.each_with_index do |r, idx|
|
||||
desc = "" unless idx.zero?
|
||||
code = "%-#{range_width}s" % r
|
||||
@output.puts " #{pipe} #{code} ##{desc}"
|
||||
pipe = "|"
|
||||
end
|
||||
end
|
||||
@output.puts " ;"
|
||||
@output.puts ""
|
||||
end
|
||||
|
||||
@output.puts <<EOF
|
||||
# The following Ragel file was autogenerated with #{$0}
|
||||
# from: #{@chart_url}
|
||||
#
|
||||
# It defines #{properties}.
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
|
||||
# and that your input is in #{@encoding}.
|
||||
|
||||
%%{
|
||||
machine #{machine_name};
|
||||
|
||||
EOF
|
||||
|
||||
properties.each { |x| generate_machine( x, x ) }
|
||||
|
||||
@output.puts <<EOF
|
||||
}%%
|
||||
EOF
|
||||
2135
vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode_derived.rl
generated
vendored
Normal file
2135
vendor/github.com/hashicorp/hcl/v2/hclsyntax/unicode_derived.rl
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
86
vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go
generated
vendored
Normal file
86
vendor/github.com/hashicorp/hcl/v2/hclsyntax/variables.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// Variables returns all of the variables referenced within a given experssion.
|
||||
//
|
||||
// This is the implementation of the "Variables" method on every native
|
||||
// expression.
|
||||
func Variables(expr Expression) []hcl.Traversal {
|
||||
var vars []hcl.Traversal
|
||||
|
||||
walker := &variablesWalker{
|
||||
Callback: func(t hcl.Traversal) {
|
||||
vars = append(vars, t)
|
||||
},
|
||||
}
|
||||
|
||||
Walk(expr, walker)
|
||||
|
||||
return vars
|
||||
}
|
||||
|
||||
// variablesWalker is a Walker implementation that calls its callback for any
|
||||
// root scope traversal found while walking.
|
||||
type variablesWalker struct {
|
||||
Callback func(hcl.Traversal)
|
||||
localScopes []map[string]struct{}
|
||||
}
|
||||
|
||||
func (w *variablesWalker) Enter(n Node) hcl.Diagnostics {
|
||||
switch tn := n.(type) {
|
||||
case *ScopeTraversalExpr:
|
||||
t := tn.Traversal
|
||||
|
||||
// Check if the given root name appears in any of the active
|
||||
// local scopes. We don't want to return local variables here, since
|
||||
// the goal of walking variables is to tell the calling application
|
||||
// which names it needs to populate in the _root_ scope.
|
||||
name := t.RootName()
|
||||
for _, names := range w.localScopes {
|
||||
if _, localized := names[name]; localized {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
w.Callback(t)
|
||||
case ChildScope:
|
||||
w.localScopes = append(w.localScopes, tn.LocalNames)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *variablesWalker) Exit(n Node) hcl.Diagnostics {
|
||||
switch n.(type) {
|
||||
case ChildScope:
|
||||
// pop the latest local scope, assuming that the walker will
|
||||
// behave symmetrically as promised.
|
||||
w.localScopes = w.localScopes[:len(w.localScopes)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChildScope is a synthetic AST node that is visited during a walk to
|
||||
// indicate that its descendent will be evaluated in a child scope, which
|
||||
// may mask certain variables from the parent scope as locals.
|
||||
//
|
||||
// ChildScope nodes don't really exist in the AST, but are rather synthesized
|
||||
// on the fly during walk. Therefore it doesn't do any good to transform them;
|
||||
// instead, transform either parent node that created a scope or the expression
|
||||
// that the child scope struct wraps.
|
||||
type ChildScope struct {
|
||||
LocalNames map[string]struct{}
|
||||
Expr Expression
|
||||
}
|
||||
|
||||
func (e ChildScope) walkChildNodes(w internalWalkFunc) {
|
||||
w(e.Expr)
|
||||
}
|
||||
|
||||
// Range returns the range of the expression that the ChildScope is
|
||||
// encapsulating. It isn't really very useful to call Range on a ChildScope.
|
||||
func (e ChildScope) Range() hcl.Range {
|
||||
return e.Expr.Range()
|
||||
}
|
||||
41
vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go
generated
vendored
Normal file
41
vendor/github.com/hashicorp/hcl/v2/hclsyntax/walk.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package hclsyntax
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
)
|
||||
|
||||
// VisitFunc is the callback signature for VisitAll.
|
||||
type VisitFunc func(node Node) hcl.Diagnostics
|
||||
|
||||
// VisitAll is a basic way to traverse the AST beginning with a particular
|
||||
// node. The given function will be called once for each AST node in
|
||||
// depth-first order, but no context is provided about the shape of the tree.
|
||||
//
|
||||
// The VisitFunc may return diagnostics, in which case they will be accumulated
|
||||
// and returned as a single set.
|
||||
func VisitAll(node Node, f VisitFunc) hcl.Diagnostics {
|
||||
diags := f(node)
|
||||
node.walkChildNodes(func(node Node) {
|
||||
diags = append(diags, VisitAll(node, f)...)
|
||||
})
|
||||
return diags
|
||||
}
|
||||
|
||||
// Walker is an interface used with Walk.
|
||||
type Walker interface {
|
||||
Enter(node Node) hcl.Diagnostics
|
||||
Exit(node Node) hcl.Diagnostics
|
||||
}
|
||||
|
||||
// Walk is a more complex way to traverse the AST starting with a particular
|
||||
// node, which provides information about the tree structure via separate
|
||||
// Enter and Exit functions.
|
||||
func Walk(node Node, w Walker) hcl.Diagnostics {
|
||||
diags := w.Enter(node)
|
||||
node.walkChildNodes(func(node Node) {
|
||||
diags = append(diags, Walk(node, w)...)
|
||||
})
|
||||
moreDiags := w.Exit(node)
|
||||
diags = append(diags, moreDiags...)
|
||||
return diags
|
||||
}
|
||||
121
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go
generated
vendored
Normal file
121
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
package hclwrite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
inTree
|
||||
|
||||
srcBytes []byte
|
||||
body *node
|
||||
}
|
||||
|
||||
// NewEmptyFile constructs a new file with no content, ready to be mutated
|
||||
// by other calls that append to its body.
|
||||
func NewEmptyFile() *File {
|
||||
f := &File{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
body := newBody()
|
||||
f.body = f.children.Append(body)
|
||||
return f
|
||||
}
|
||||
|
||||
// Body returns the root body of the file, which contains the top-level
|
||||
// attributes and blocks.
|
||||
func (f *File) Body() *Body {
|
||||
return f.body.content.(*Body)
|
||||
}
|
||||
|
||||
// WriteTo writes the tokens underlying the receiving file to the given writer.
|
||||
//
|
||||
// The tokens first have a simple formatting pass applied that adjusts only
|
||||
// the spaces between them.
|
||||
func (f *File) WriteTo(wr io.Writer) (int64, error) {
|
||||
tokens := f.inTree.children.BuildTokens(nil)
|
||||
format(tokens)
|
||||
return tokens.WriteTo(wr)
|
||||
}
|
||||
|
||||
// Bytes returns a buffer containing the source code resulting from the
|
||||
// tokens underlying the receiving file. If any updates have been made via
|
||||
// the AST API, these will be reflected in the result.
|
||||
func (f *File) Bytes() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
f.WriteTo(buf)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type comments struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
tokens Tokens
|
||||
}
|
||||
|
||||
func newComments(tokens Tokens) *comments {
|
||||
return &comments{
|
||||
tokens: tokens,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *comments) BuildTokens(to Tokens) Tokens {
|
||||
return c.tokens.BuildTokens(to)
|
||||
}
|
||||
|
||||
type identifier struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
token *Token
|
||||
}
|
||||
|
||||
func newIdentifier(token *Token) *identifier {
|
||||
return &identifier{
|
||||
token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *identifier) BuildTokens(to Tokens) Tokens {
|
||||
return append(to, i.token)
|
||||
}
|
||||
|
||||
func (i *identifier) hasName(name string) bool {
|
||||
return name == string(i.token.Bytes)
|
||||
}
|
||||
|
||||
type number struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
token *Token
|
||||
}
|
||||
|
||||
func newNumber(token *Token) *number {
|
||||
return &number{
|
||||
token: token,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *number) BuildTokens(to Tokens) Tokens {
|
||||
return append(to, n.token)
|
||||
}
|
||||
|
||||
type quoted struct {
|
||||
leafNode
|
||||
|
||||
parent *node
|
||||
tokens Tokens
|
||||
}
|
||||
|
||||
func newQuoted(tokens Tokens) *quoted {
|
||||
return "ed{
|
||||
tokens: tokens,
|
||||
}
|
||||
}
|
||||
|
||||
func (q *quoted) BuildTokens(to Tokens) Tokens {
|
||||
return q.tokens.BuildTokens(to)
|
||||
}
|
||||
48
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_attribute.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
)
|
||||
|
||||
type Attribute struct {
|
||||
inTree
|
||||
|
||||
leadComments *node
|
||||
name *node
|
||||
expr *node
|
||||
lineComments *node
|
||||
}
|
||||
|
||||
func newAttribute() *Attribute {
|
||||
return &Attribute{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Attribute) init(name string, expr *Expression) {
|
||||
expr.assertUnattached()
|
||||
|
||||
nameTok := newIdentToken(name)
|
||||
nameObj := newIdentifier(nameTok)
|
||||
a.leadComments = a.children.Append(newComments(nil))
|
||||
a.name = a.children.Append(nameObj)
|
||||
a.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenEqual,
|
||||
Bytes: []byte{'='},
|
||||
},
|
||||
})
|
||||
a.expr = a.children.Append(expr)
|
||||
a.expr.list = a.children
|
||||
a.lineComments = a.children.Append(newComments(nil))
|
||||
a.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (a *Attribute) Expr() *Expression {
|
||||
return a.expr.content.(*Expression)
|
||||
}
|
||||
118
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package hclwrite
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Block struct {
|
||||
inTree
|
||||
|
||||
leadComments *node
|
||||
typeName *node
|
||||
labels nodeSet
|
||||
open *node
|
||||
body *node
|
||||
close *node
|
||||
}
|
||||
|
||||
func newBlock() *Block {
|
||||
return &Block{
|
||||
inTree: newInTree(),
|
||||
labels: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlock constructs a new, empty block with the given type name and labels.
|
||||
func NewBlock(typeName string, labels []string) *Block {
|
||||
block := newBlock()
|
||||
block.init(typeName, labels)
|
||||
return block
|
||||
}
|
||||
|
||||
func (b *Block) init(typeName string, labels []string) {
|
||||
nameTok := newIdentToken(typeName)
|
||||
nameObj := newIdentifier(nameTok)
|
||||
b.leadComments = b.children.Append(newComments(nil))
|
||||
b.typeName = b.children.Append(nameObj)
|
||||
for _, label := range labels {
|
||||
labelToks := TokensForValue(cty.StringVal(label))
|
||||
labelObj := newQuoted(labelToks)
|
||||
labelNode := b.children.Append(labelObj)
|
||||
b.labels.Add(labelNode)
|
||||
}
|
||||
b.open = b.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenOBrace,
|
||||
Bytes: []byte{'{'},
|
||||
},
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
body := newBody() // initially totally empty; caller can append to it subsequently
|
||||
b.body = b.children.Append(body)
|
||||
b.close = b.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenCBrace,
|
||||
Bytes: []byte{'}'},
|
||||
},
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Body returns the body that represents the content of the receiving block.
|
||||
//
|
||||
// Appending to or otherwise modifying this body will make changes to the
|
||||
// tokens that are generated between the blocks open and close braces.
|
||||
func (b *Block) Body() *Body {
|
||||
return b.body.content.(*Body)
|
||||
}
|
||||
|
||||
// Type returns the type name of the block.
|
||||
func (b *Block) Type() string {
|
||||
typeNameObj := b.typeName.content.(*identifier)
|
||||
return string(typeNameObj.token.Bytes)
|
||||
}
|
||||
|
||||
// Labels returns the labels of the block.
|
||||
func (b *Block) Labels() []string {
|
||||
labelNames := make([]string, 0, len(b.labels))
|
||||
list := b.labels.List()
|
||||
|
||||
for _, label := range list {
|
||||
switch labelObj := label.content.(type) {
|
||||
case *identifier:
|
||||
if labelObj.token.Type == hclsyntax.TokenIdent {
|
||||
labelString := string(labelObj.token.Bytes)
|
||||
labelNames = append(labelNames, labelString)
|
||||
}
|
||||
|
||||
case *quoted:
|
||||
tokens := labelObj.tokens
|
||||
if len(tokens) == 3 &&
|
||||
tokens[0].Type == hclsyntax.TokenOQuote &&
|
||||
tokens[1].Type == hclsyntax.TokenQuotedLit &&
|
||||
tokens[2].Type == hclsyntax.TokenCQuote {
|
||||
// Note that TokenQuotedLit may contain escape sequences.
|
||||
labelString, diags := hclsyntax.ParseStringLiteralToken(tokens[1].asHCLSyntax())
|
||||
|
||||
// If parsing the string literal returns error diagnostics
|
||||
// then we can just assume the label doesn't match, because it's invalid in some way.
|
||||
if !diags.HasErrors() {
|
||||
labelNames = append(labelNames, labelString)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
// If neither of the previous cases are true (should be impossible)
|
||||
// then we can just ignore it, because it's invalid too.
|
||||
}
|
||||
}
|
||||
|
||||
return labelNames
|
||||
}
|
||||
219
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_body.go
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
package hclwrite
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Body struct {
|
||||
inTree
|
||||
|
||||
items nodeSet
|
||||
}
|
||||
|
||||
func newBody() *Body {
|
||||
return &Body{
|
||||
inTree: newInTree(),
|
||||
items: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Body) appendItem(c nodeContent) *node {
|
||||
nn := b.children.Append(c)
|
||||
b.items.Add(nn)
|
||||
return nn
|
||||
}
|
||||
|
||||
func (b *Body) appendItemNode(nn *node) *node {
|
||||
nn.assertUnattached()
|
||||
b.children.AppendNode(nn)
|
||||
b.items.Add(nn)
|
||||
return nn
|
||||
}
|
||||
|
||||
// Clear removes all of the items from the body, making it empty.
|
||||
func (b *Body) Clear() {
|
||||
b.children.Clear()
|
||||
}
|
||||
|
||||
func (b *Body) AppendUnstructuredTokens(ts Tokens) {
|
||||
b.inTree.children.Append(ts)
|
||||
}
|
||||
|
||||
// Attributes returns a new map of all of the attributes in the body, with
|
||||
// the attribute names as the keys.
|
||||
func (b *Body) Attributes() map[string]*Attribute {
|
||||
ret := make(map[string]*Attribute)
|
||||
for n := range b.items {
|
||||
if attr, isAttr := n.content.(*Attribute); isAttr {
|
||||
nameObj := attr.name.content.(*identifier)
|
||||
name := string(nameObj.token.Bytes)
|
||||
ret[name] = attr
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Blocks returns a new slice of all the blocks in the body.
|
||||
func (b *Body) Blocks() []*Block {
|
||||
ret := make([]*Block, 0, len(b.items))
|
||||
for n := range b.items {
|
||||
if block, isBlock := n.content.(*Block); isBlock {
|
||||
ret = append(ret, block)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetAttribute returns the attribute from the body that has the given name,
|
||||
// or returns nil if there is currently no matching attribute.
|
||||
func (b *Body) GetAttribute(name string) *Attribute {
|
||||
for n := range b.items {
|
||||
if attr, isAttr := n.content.(*Attribute); isAttr {
|
||||
nameObj := attr.name.content.(*identifier)
|
||||
if nameObj.hasName(name) {
|
||||
// We've found it!
|
||||
return attr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAttributeNode is like GetAttribute but it returns the node containing
|
||||
// the selected attribute (if one is found) rather than the attribute itself.
|
||||
func (b *Body) getAttributeNode(name string) *node {
|
||||
for n := range b.items {
|
||||
if attr, isAttr := n.content.(*Attribute); isAttr {
|
||||
nameObj := attr.name.content.(*identifier)
|
||||
if nameObj.hasName(name) {
|
||||
// We've found it!
|
||||
return n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FirstMatchingBlock returns a first matching block from the body that has the
|
||||
// given name and labels or returns nil if there is currently no matching
|
||||
// block.
|
||||
func (b *Body) FirstMatchingBlock(typeName string, labels []string) *Block {
|
||||
for _, block := range b.Blocks() {
|
||||
if typeName == block.Type() {
|
||||
labelNames := block.Labels()
|
||||
if len(labels) == 0 && len(labelNames) == 0 {
|
||||
return block
|
||||
}
|
||||
if reflect.DeepEqual(labels, labelNames) {
|
||||
return block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the given block from the body, if it's in that body.
|
||||
// If it isn't present, this is a no-op.
|
||||
//
|
||||
// Returns true if it removed something, or false otherwise.
|
||||
func (b *Body) RemoveBlock(block *Block) bool {
|
||||
for n := range b.items {
|
||||
if n.content == block {
|
||||
n.Detach()
|
||||
b.items.Remove(n)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SetAttributeValue either replaces the expression of an existing attribute
|
||||
// of the given name or adds a new attribute definition to the end of the block.
|
||||
//
|
||||
// The value is given as a cty.Value, and must therefore be a literal. To set
|
||||
// a variable reference or other traversal, use SetAttributeTraversal.
|
||||
//
|
||||
// The return value is the attribute that was either modified in-place or
|
||||
// created.
|
||||
func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute {
|
||||
attr := b.GetAttribute(name)
|
||||
expr := NewExpressionLiteral(val)
|
||||
if attr != nil {
|
||||
attr.expr = attr.expr.ReplaceWith(expr)
|
||||
} else {
|
||||
attr := newAttribute()
|
||||
attr.init(name, expr)
|
||||
b.appendItem(attr)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// SetAttributeTraversal either replaces the expression of an existing attribute
|
||||
// of the given name or adds a new attribute definition to the end of the body.
|
||||
//
|
||||
// The new expression is given as a hcl.Traversal, which must be an absolute
|
||||
// traversal. To set a literal value, use SetAttributeValue.
|
||||
//
|
||||
// The return value is the attribute that was either modified in-place or
|
||||
// created.
|
||||
func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute {
|
||||
attr := b.GetAttribute(name)
|
||||
expr := NewExpressionAbsTraversal(traversal)
|
||||
if attr != nil {
|
||||
attr.expr = attr.expr.ReplaceWith(expr)
|
||||
} else {
|
||||
attr := newAttribute()
|
||||
attr.init(name, expr)
|
||||
b.appendItem(attr)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// RemoveAttribute removes the attribute with the given name from the body.
|
||||
//
|
||||
// The return value is the attribute that was removed, or nil if there was
|
||||
// no such attribute (in which case the call was a no-op).
|
||||
func (b *Body) RemoveAttribute(name string) *Attribute {
|
||||
node := b.getAttributeNode(name)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node.Detach()
|
||||
b.items.Remove(node)
|
||||
return node.content.(*Attribute)
|
||||
}
|
||||
|
||||
// AppendBlock appends an existing block (which must not be already attached
|
||||
// to a body) to the end of the receiving body.
|
||||
func (b *Body) AppendBlock(block *Block) *Block {
|
||||
b.appendItem(block)
|
||||
return block
|
||||
}
|
||||
|
||||
// AppendNewBlock appends a new nested block to the end of the receiving body
|
||||
// with the given type name and labels.
|
||||
func (b *Body) AppendNewBlock(typeName string, labels []string) *Block {
|
||||
block := newBlock()
|
||||
block.init(typeName, labels)
|
||||
b.appendItem(block)
|
||||
return block
|
||||
}
|
||||
|
||||
// AppendNewline appends a newline token to th end of the receiving body,
|
||||
// which generally serves as a separator between different sets of body
|
||||
// contents.
|
||||
func (b *Body) AppendNewline() {
|
||||
b.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenNewline,
|
||||
Bytes: []byte{'\n'},
|
||||
},
|
||||
})
|
||||
}
|
||||
201
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go
generated
vendored
Normal file
201
vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_expression.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
package hclwrite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type Expression struct {
|
||||
inTree
|
||||
|
||||
absTraversals nodeSet
|
||||
}
|
||||
|
||||
func newExpression() *Expression {
|
||||
return &Expression{
|
||||
inTree: newInTree(),
|
||||
absTraversals: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewExpressionLiteral constructs an an expression that represents the given
|
||||
// literal value.
|
||||
//
|
||||
// Since an unknown value cannot be represented in source code, this function
|
||||
// will panic if the given value is unknown or contains a nested unknown value.
|
||||
// Use val.IsWhollyKnown before calling to be sure.
|
||||
//
|
||||
// HCL native syntax does not directly represent lists, maps, and sets, and
|
||||
// instead relies on the automatic conversions to those collection types from
|
||||
// either list or tuple constructor syntax. Therefore converting collection
|
||||
// values to source code and re-reading them will lose type information, and
|
||||
// the reader must provide a suitable type at decode time to recover the
|
||||
// original value.
|
||||
func NewExpressionLiteral(val cty.Value) *Expression {
|
||||
toks := TokensForValue(val)
|
||||
expr := newExpression()
|
||||
expr.children.AppendUnstructuredTokens(toks)
|
||||
return expr
|
||||
}
|
||||
|
||||
// NewExpressionAbsTraversal constructs an expression that represents the
|
||||
// given traversal, which must be absolute or this function will panic.
|
||||
func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression {
|
||||
if traversal.IsRelative() {
|
||||
panic("can't construct expression from relative traversal")
|
||||
}
|
||||
|
||||
physT := newTraversal()
|
||||
rootName := traversal.RootName()
|
||||
steps := traversal[1:]
|
||||
|
||||
{
|
||||
tn := newTraverseName()
|
||||
tn.name = tn.children.Append(newIdentifier(&Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(rootName),
|
||||
}))
|
||||
physT.steps.Add(physT.children.Append(tn))
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
switch ts := step.(type) {
|
||||
case hcl.TraverseAttr:
|
||||
tn := newTraverseName()
|
||||
tn.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenDot,
|
||||
Bytes: []byte{'.'},
|
||||
},
|
||||
})
|
||||
tn.name = tn.children.Append(newIdentifier(&Token{
|
||||
Type: hclsyntax.TokenIdent,
|
||||
Bytes: []byte(ts.Name),
|
||||
}))
|
||||
physT.steps.Add(physT.children.Append(tn))
|
||||
case hcl.TraverseIndex:
|
||||
ti := newTraverseIndex()
|
||||
ti.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenOBrack,
|
||||
Bytes: []byte{'['},
|
||||
},
|
||||
})
|
||||
indexExpr := NewExpressionLiteral(ts.Key)
|
||||
ti.key = ti.children.Append(indexExpr)
|
||||
ti.children.AppendUnstructuredTokens(Tokens{
|
||||
{
|
||||
Type: hclsyntax.TokenCBrack,
|
||||
Bytes: []byte{']'},
|
||||
},
|
||||
})
|
||||
physT.steps.Add(physT.children.Append(ti))
|
||||
}
|
||||
}
|
||||
|
||||
expr := newExpression()
|
||||
expr.absTraversals.Add(expr.children.Append(physT))
|
||||
return expr
|
||||
}
|
||||
|
||||
// Variables returns the absolute traversals that exist within the receiving
|
||||
// expression.
|
||||
func (e *Expression) Variables() []*Traversal {
|
||||
nodes := e.absTraversals.List()
|
||||
ret := make([]*Traversal, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ret[i] = node.content.(*Traversal)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// RenameVariablePrefix examines each of the absolute traversals in the
|
||||
// receiving expression to see if they have the given sequence of names as
|
||||
// a prefix prefix. If so, they are updated in place to have the given
|
||||
// replacement names instead of that prefix.
|
||||
//
|
||||
// This can be used to implement symbol renaming. The calling application can
|
||||
// visit all relevant expressions in its input and apply the same renaming
|
||||
// to implement a global symbol rename.
|
||||
//
|
||||
// The search and replacement traversals must be the same length, or this
|
||||
// method will panic. Only attribute access operations can be matched and
|
||||
// replaced. Index steps never match the prefix.
|
||||
func (e *Expression) RenameVariablePrefix(search, replacement []string) {
|
||||
if len(search) != len(replacement) {
|
||||
panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement)))
|
||||
}
|
||||
Traversals:
|
||||
for node := range e.absTraversals {
|
||||
traversal := node.content.(*Traversal)
|
||||
if len(traversal.steps) < len(search) {
|
||||
// If it's shorter then it can't have our prefix
|
||||
continue
|
||||
}
|
||||
|
||||
stepNodes := traversal.steps.List()
|
||||
for i, name := range search {
|
||||
step, isName := stepNodes[i].content.(*TraverseName)
|
||||
if !isName {
|
||||
continue Traversals // only name nodes can match
|
||||
}
|
||||
foundNameBytes := step.name.content.(*identifier).token.Bytes
|
||||
if len(foundNameBytes) != len(name) {
|
||||
continue Traversals
|
||||
}
|
||||
if string(foundNameBytes) != name {
|
||||
continue Traversals
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here then the prefix matched, so now we'll swap in
|
||||
// the replacement strings.
|
||||
for i, name := range replacement {
|
||||
step := stepNodes[i].content.(*TraverseName)
|
||||
token := step.name.content.(*identifier).token
|
||||
token.Bytes = []byte(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Traversal represents a sequence of variable, attribute, and/or index
|
||||
// operations.
|
||||
type Traversal struct {
|
||||
inTree
|
||||
|
||||
steps nodeSet
|
||||
}
|
||||
|
||||
func newTraversal() *Traversal {
|
||||
return &Traversal{
|
||||
inTree: newInTree(),
|
||||
steps: newNodeSet(),
|
||||
}
|
||||
}
|
||||
|
||||
type TraverseName struct {
|
||||
inTree
|
||||
|
||||
name *node
|
||||
}
|
||||
|
||||
func newTraverseName() *TraverseName {
|
||||
return &TraverseName{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
||||
|
||||
type TraverseIndex struct {
|
||||
inTree
|
||||
|
||||
key *node
|
||||
}
|
||||
|
||||
func newTraverseIndex() *TraverseIndex {
|
||||
return &TraverseIndex{
|
||||
inTree: newInTree(),
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user