updated GHA
Update to v2 SDK updated dependencies
This commit is contained in:
5
vendor/github.com/zclconf/go-cty-yaml/.travis.yml
generated
vendored
5
vendor/github.com/zclconf/go-cty-yaml/.travis.yml
generated
vendored
@ -1,5 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12
|
||||
|
10
vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md
generated
vendored
10
vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md
generated
vendored
@ -1,10 +0,0 @@
|
||||
# 1.0.1 (July 30, 2019)
|
||||
|
||||
* The YAML decoder is now correctly treating quoted scalars as verbatim literal
|
||||
strings rather than using the fuzzy type selection rules for them. Fuzzy
|
||||
type selection rules still apply to unquoted scalars.
|
||||
([#4](https://github.com/zclconf/go-cty-yaml/pull/4))
|
||||
|
||||
# 1.0.0 (May 26, 2019)
|
||||
|
||||
Initial release.
|
201
vendor/github.com/zclconf/go-cty-yaml/LICENSE
generated
vendored
201
vendor/github.com/zclconf/go-cty-yaml/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
31
vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml
generated
vendored
31
vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml
generated
vendored
@ -1,31 +0,0 @@
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
20
vendor/github.com/zclconf/go-cty-yaml/NOTICE
generated
vendored
20
vendor/github.com/zclconf/go-cty-yaml/NOTICE
generated
vendored
@ -1,20 +0,0 @@
|
||||
This package is derived from gopkg.in/yaml.v2, which is copyright
|
||||
2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
Includes mechanical ports of code from libyaml, distributed under its original
|
||||
license. See LICENSE.libyaml for more information.
|
||||
|
||||
Modifications for cty interfacing copyright 2019 Martin Atkins, and
|
||||
distributed under the same license terms.
|
739
vendor/github.com/zclconf/go-cty-yaml/apic.go
generated
vendored
739
vendor/github.com/zclconf/go-cty-yaml/apic.go
generated
vendored
@ -1,739 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
69
vendor/github.com/zclconf/go-cty-yaml/converter.go
generated
vendored
69
vendor/github.com/zclconf/go-cty-yaml/converter.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// ConverterConfig is used to configure a new converter, using NewConverter.
|
||||
type ConverterConfig struct {
|
||||
// EncodeAsFlow, when set to true, causes Marshal to produce flow-style
|
||||
// mapping and sequence serializations.
|
||||
EncodeAsFlow bool
|
||||
}
|
||||
|
||||
// A Converter can marshal and unmarshal between cty values and YAML bytes.
|
||||
//
|
||||
// Because there are many different ways to map cty to YAML and vice-versa,
|
||||
// a converter is configurable using the settings in ConverterConfig, which
|
||||
// allow for a few different permutations of mapping to YAML.
|
||||
//
|
||||
// If you are just trying to work with generic, standard YAML, the predefined
|
||||
// converter in Standard should be good enough.
|
||||
type Converter struct {
|
||||
encodeAsFlow bool
|
||||
}
|
||||
|
||||
// NewConverter creates a new Converter with the given configuration.
|
||||
func NewConverter(config *ConverterConfig) *Converter {
|
||||
return &Converter{
|
||||
encodeAsFlow: config.EncodeAsFlow,
|
||||
}
|
||||
}
|
||||
|
||||
// Standard is a predefined Converter that produces and consumes generic YAML
|
||||
// using only built-in constructs that any other YAML implementation ought to
|
||||
// understand.
|
||||
var Standard *Converter = NewConverter(&ConverterConfig{})
|
||||
|
||||
// ImpliedType analyzes the given source code and returns a suitable type that
|
||||
// it could be decoded into.
|
||||
//
|
||||
// For a converter that is using standard YAML rather than cty-specific custom
|
||||
// tags, only a subset of cty types can be produced: strings, numbers, bools,
|
||||
// tuple types, and object types.
|
||||
func (c *Converter) ImpliedType(src []byte) (cty.Type, error) {
|
||||
return c.impliedType(src)
|
||||
}
|
||||
|
||||
// Marshal serializes the given value into a YAML document, using a fixed
|
||||
// mapping from cty types to YAML constructs.
|
||||
//
|
||||
// Note that unlike the function of the same name in the cty JSON package,
|
||||
// this does not take a type constraint and therefore the YAML serialization
|
||||
// cannot preserve late-bound type information in the serialization to be
|
||||
// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
|
||||
// constraint given to Unmarshal will be decoded as if the corresponding portion
|
||||
// of the input were processed with ImpliedType to find a target type.
|
||||
func (c *Converter) Marshal(v cty.Value) ([]byte, error) {
|
||||
return c.marshal(v)
|
||||
}
|
||||
|
||||
// Unmarshal reads the document found within the given source buffer
|
||||
// and attempts to convert it into a value conforming to the given type
|
||||
// constraint.
|
||||
//
|
||||
// An error is returned if the given source contains any YAML document
|
||||
// delimiters.
|
||||
func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
|
||||
return c.unmarshal(src, ty)
|
||||
}
|
57
vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go
generated
vendored
57
vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/function"
|
||||
)
|
||||
|
||||
// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code
|
||||
// into a cty Value, using the ImpliedType and Unmarshal methods of the
|
||||
// Standard pre-defined converter.
|
||||
var YAMLDecodeFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "src",
|
||||
Type: cty.String,
|
||||
},
|
||||
},
|
||||
Type: func(args []cty.Value) (cty.Type, error) {
|
||||
if !args[0].IsKnown() {
|
||||
return cty.DynamicPseudoType, nil
|
||||
}
|
||||
if args[0].IsNull() {
|
||||
return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null")
|
||||
}
|
||||
return Standard.ImpliedType([]byte(args[0].AsString()))
|
||||
},
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
if retType == cty.DynamicPseudoType {
|
||||
return cty.DynamicVal, nil
|
||||
}
|
||||
return Standard.Unmarshal([]byte(args[0].AsString()), retType)
|
||||
},
|
||||
})
|
||||
|
||||
// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value
|
||||
// into YAML.
|
||||
var YAMLEncodeFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
{
|
||||
Name: "value",
|
||||
Type: cty.DynamicPseudoType,
|
||||
AllowNull: true,
|
||||
AllowDynamicType: true,
|
||||
},
|
||||
},
|
||||
Type: function.StaticReturnType(cty.String),
|
||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
||||
if !args[0].IsWhollyKnown() {
|
||||
return cty.UnknownVal(retType), nil
|
||||
}
|
||||
raw, err := Standard.Marshal(args[0])
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
return cty.StringVal(string(raw)), nil
|
||||
},
|
||||
})
|
261
vendor/github.com/zclconf/go-cty-yaml/decode.go
generated
vendored
261
vendor/github.com/zclconf/go-cty-yaml/decode.go
generated
vendored
@ -1,261 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
|
||||
p := &yaml_parser_t{}
|
||||
if !yaml_parser_initialize(p) {
|
||||
return cty.NilVal, errors.New("failed to initialize YAML parser")
|
||||
}
|
||||
if len(src) == 0 {
|
||||
src = []byte{'\n'}
|
||||
}
|
||||
|
||||
an := &valueAnalysis{
|
||||
anchorsPending: map[string]int{},
|
||||
anchorVals: map[string]cty.Value{},
|
||||
}
|
||||
|
||||
yaml_parser_set_input_string(p, src)
|
||||
|
||||
var evt yaml_event_t
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_STREAM_START_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&evt, "missing stream start token")
|
||||
}
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_DOCUMENT_START_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&evt, "missing start of document")
|
||||
}
|
||||
|
||||
v, err := c.unmarshalParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if evt.typ == yaml_DOCUMENT_START_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed")
|
||||
}
|
||||
if evt.typ != yaml_DOCUMENT_END_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
|
||||
}
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_STREAM_END_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value")
|
||||
}
|
||||
|
||||
return convert.Convert(v, ty)
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) {
|
||||
var evt yaml_event_t
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
return c.unmarshalParseRemainder(an, &evt, p)
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
|
||||
switch evt.typ {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return c.unmarshalScalar(an, evt, p)
|
||||
case yaml_ALIAS_EVENT:
|
||||
return c.unmarshalAlias(an, evt, p)
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return c.unmarshalMapping(an, evt, p)
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return c.unmarshalSequence(an, evt, p)
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed")
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Decoding an empty buffer, probably
|
||||
return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream")
|
||||
default:
|
||||
// Should never happen; the above should be comprehensive
|
||||
return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
|
||||
src := evt.value
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
|
||||
if len(anchor) > 0 {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
|
||||
if err != nil {
|
||||
return cty.NilVal, parseEventErrorWrap(evt, err)
|
||||
}
|
||||
|
||||
if val.RawEquals(mergeMappingVal) {
|
||||
// In any context other than a mapping key, this is just a plain string
|
||||
val = cty.StringVal("<<")
|
||||
}
|
||||
|
||||
if len(anchor) > 0 {
|
||||
an.completeAnchor(anchor, val)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
|
||||
if tag != "" && tag != yaml_MAP_TAG {
|
||||
return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
|
||||
}
|
||||
|
||||
if anchor != "" {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
vals := make(map[string]cty.Value)
|
||||
for {
|
||||
var nextEvt yaml_event_t
|
||||
if !yaml_parser_parse(p, &nextEvt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if nextEvt.typ == yaml_MAPPING_END_EVENT {
|
||||
v := cty.ObjectVal(vals)
|
||||
if anchor != "" {
|
||||
an.completeAnchor(anchor, v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
if nextEvt.typ != yaml_SCALAR_EVENT {
|
||||
return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
|
||||
}
|
||||
keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
if keyVal.RawEquals(mergeMappingVal) {
|
||||
// Merging the value (which must be a mapping) into our mapping,
|
||||
// then.
|
||||
val, err := c.unmarshalParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
ty := val.Type()
|
||||
if !(ty.IsObjectType() || ty.IsMapType()) {
|
||||
return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
|
||||
}
|
||||
for it := val.ElementIterator(); it.Next(); {
|
||||
k, v := it.Element()
|
||||
vals[k.AsString()] = v
|
||||
}
|
||||
continue
|
||||
}
|
||||
if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
|
||||
keyVal = keyValStr
|
||||
} else {
|
||||
return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
|
||||
}
|
||||
if keyVal.IsNull() {
|
||||
return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null")
|
||||
}
|
||||
if !keyVal.IsKnown() {
|
||||
return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known")
|
||||
}
|
||||
val, err := c.unmarshalParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
|
||||
vals[keyVal.AsString()] = val
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
|
||||
if tag != "" && tag != yaml_SEQ_TAG {
|
||||
return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
|
||||
}
|
||||
|
||||
if anchor != "" {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
var vals []cty.Value
|
||||
for {
|
||||
var nextEvt yaml_event_t
|
||||
if !yaml_parser_parse(p, &nextEvt) {
|
||||
return cty.NilVal, parserError(p)
|
||||
}
|
||||
if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
|
||||
ty := cty.TupleVal(vals)
|
||||
if anchor != "" {
|
||||
an.completeAnchor(anchor, ty)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
||||
|
||||
val, err := c.unmarshalParseRemainder(an, &nextEvt, p)
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
|
||||
vals = append(vals, val)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
|
||||
v, err := an.anchorVal(string(evt.anchor))
|
||||
if err != nil {
|
||||
err = parseEventErrorWrap(evt, err)
|
||||
}
|
||||
return v, err
|
||||
}
|
||||
|
||||
type valueAnalysis struct {
|
||||
anchorsPending map[string]int
|
||||
anchorVals map[string]cty.Value
|
||||
}
|
||||
|
||||
func (an *valueAnalysis) beginAnchor(name string) {
|
||||
an.anchorsPending[name]++
|
||||
}
|
||||
|
||||
func (an *valueAnalysis) completeAnchor(name string, v cty.Value) {
|
||||
an.anchorsPending[name]--
|
||||
if an.anchorsPending[name] == 0 {
|
||||
delete(an.anchorsPending, name)
|
||||
}
|
||||
an.anchorVals[name] = v
|
||||
}
|
||||
|
||||
func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) {
|
||||
if _, pending := an.anchorsPending[name]; pending {
|
||||
// YAML normally allows self-referencing structures, but cty cannot
|
||||
// represent them (it requires all structures to be finite) so we
|
||||
// must fail here.
|
||||
return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
|
||||
}
|
||||
ty, ok := an.anchorVals[name]
|
||||
if !ok {
|
||||
return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
1685
vendor/github.com/zclconf/go-cty-yaml/emitterc.go
generated
vendored
1685
vendor/github.com/zclconf/go-cty-yaml/emitterc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
189
vendor/github.com/zclconf/go-cty-yaml/encode.go
generated
vendored
189
vendor/github.com/zclconf/go-cty-yaml/encode.go
generated
vendored
@ -1,189 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func (c *Converter) marshal(v cty.Value) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
e := &yaml_emitter_t{}
|
||||
yaml_emitter_initialize(e)
|
||||
yaml_emitter_set_output_writer(e, &buf)
|
||||
yaml_emitter_set_unicode(e, true)
|
||||
|
||||
var evt yaml_event_t
|
||||
yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return nil, emitterError(e)
|
||||
}
|
||||
yaml_document_start_event_initialize(&evt, nil, nil, true)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return nil, emitterError(e)
|
||||
}
|
||||
|
||||
if err := c.marshalEmit(v, e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
yaml_document_end_event_initialize(&evt, true)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return nil, emitterError(e)
|
||||
}
|
||||
yaml_stream_end_event_initialize(&evt)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return nil, emitterError(e)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error {
|
||||
ty := v.Type()
|
||||
switch {
|
||||
case v.IsNull():
|
||||
return c.marshalPrimitive(v, e)
|
||||
case !v.IsKnown():
|
||||
return fmt.Errorf("cannot serialize unknown value as YAML")
|
||||
case ty.IsPrimitiveType():
|
||||
return c.marshalPrimitive(v, e)
|
||||
case ty.IsTupleType(), ty.IsListType(), ty.IsSetType():
|
||||
return c.marshalSequence(v, e)
|
||||
case ty.IsObjectType(), ty.IsMapType():
|
||||
return c.marshalMapping(v, e)
|
||||
default:
|
||||
return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error {
|
||||
var evt yaml_event_t
|
||||
|
||||
if v.IsNull() {
|
||||
yaml_scalar_event_initialize(
|
||||
&evt,
|
||||
nil,
|
||||
nil,
|
||||
[]byte("null"),
|
||||
true,
|
||||
true,
|
||||
yaml_PLAIN_SCALAR_STYLE,
|
||||
)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v.Type() {
|
||||
case cty.String:
|
||||
str := v.AsString()
|
||||
style := yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
if strings.Contains(str, "\n") {
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
}
|
||||
yaml_scalar_event_initialize(
|
||||
&evt,
|
||||
nil,
|
||||
nil,
|
||||
[]byte(str),
|
||||
true,
|
||||
true,
|
||||
style,
|
||||
)
|
||||
case cty.Number:
|
||||
str := v.AsBigFloat().Text('f', -1)
|
||||
yaml_scalar_event_initialize(
|
||||
&evt,
|
||||
nil,
|
||||
nil,
|
||||
[]byte(str),
|
||||
true,
|
||||
true,
|
||||
yaml_PLAIN_SCALAR_STYLE,
|
||||
)
|
||||
case cty.Bool:
|
||||
var str string
|
||||
switch v {
|
||||
case cty.True:
|
||||
str = "true"
|
||||
case cty.False:
|
||||
str = "false"
|
||||
}
|
||||
yaml_scalar_event_initialize(
|
||||
&evt,
|
||||
nil,
|
||||
nil,
|
||||
[]byte(str),
|
||||
true,
|
||||
true,
|
||||
yaml_PLAIN_SCALAR_STYLE,
|
||||
)
|
||||
}
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error {
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if c.encodeAsFlow {
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
|
||||
var evt yaml_event_t
|
||||
yaml_sequence_start_event_initialize(&evt, nil, nil, true, style)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
|
||||
for it := v.ElementIterator(); it.Next(); {
|
||||
_, v := it.Element()
|
||||
err := c.marshalEmit(v, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
yaml_sequence_end_event_initialize(&evt)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error {
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if c.encodeAsFlow {
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
|
||||
var evt yaml_event_t
|
||||
yaml_mapping_start_event_initialize(&evt, nil, nil, true, style)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
|
||||
for it := v.ElementIterator(); it.Next(); {
|
||||
k, v := it.Element()
|
||||
err := c.marshalEmit(k, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.marshalEmit(v, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
yaml_mapping_end_event_initialize(&evt)
|
||||
if !yaml_emitter_emit(e, &evt) {
|
||||
return emitterError(e)
|
||||
}
|
||||
return nil
|
||||
}
|
97
vendor/github.com/zclconf/go-cty-yaml/error.go
generated
vendored
97
vendor/github.com/zclconf/go-cty-yaml/error.go
generated
vendored
@ -1,97 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is an error implementation used to report errors that correspond to
|
||||
// a particular position in an input buffer.
|
||||
type Error struct {
|
||||
cause error
|
||||
Line, Column int
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error())
|
||||
}
|
||||
|
||||
// Cause is an implementation of the interface used by
|
||||
// github.com/pkg/errors.Cause, returning the underlying error without the
|
||||
// position information.
|
||||
func (e Error) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper
|
||||
// returning the underlying error without the position information.
|
||||
func (e Error) WrappedErrors() []error {
|
||||
return []error{e.cause}
|
||||
}
|
||||
|
||||
func parserError(p *yaml_parser_t) error {
|
||||
var cause error
|
||||
if len(p.problem) > 0 {
|
||||
cause = errors.New(p.problem)
|
||||
} else {
|
||||
cause = errors.New("invalid YAML syntax") // useless generic error, then
|
||||
}
|
||||
|
||||
return parserErrorWrap(p, cause)
|
||||
}
|
||||
|
||||
func parserErrorWrap(p *yaml_parser_t, cause error) error {
|
||||
switch {
|
||||
case p.problem_mark.line != 0:
|
||||
line := p.problem_mark.line
|
||||
column := p.problem_mark.column
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
column = 0
|
||||
}
|
||||
return Error{
|
||||
cause: cause,
|
||||
Line: line,
|
||||
Column: column + 1,
|
||||
}
|
||||
case p.context_mark.line != 0:
|
||||
return Error{
|
||||
cause: cause,
|
||||
Line: p.context_mark.line,
|
||||
Column: p.context_mark.column + 1,
|
||||
}
|
||||
default:
|
||||
return cause
|
||||
}
|
||||
}
|
||||
|
||||
func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error {
|
||||
return parserErrorWrap(p, fmt.Errorf(f, vals...))
|
||||
}
|
||||
|
||||
func parseEventErrorWrap(evt *yaml_event_t, cause error) error {
|
||||
if evt.start_mark.line == 0 {
|
||||
// Event does not have a start mark, so we won't wrap the error at all
|
||||
return cause
|
||||
}
|
||||
return Error{
|
||||
cause: cause,
|
||||
Line: evt.start_mark.line,
|
||||
Column: evt.start_mark.column + 1,
|
||||
}
|
||||
}
|
||||
|
||||
func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error {
|
||||
return parseEventErrorWrap(evt, fmt.Errorf(f, vals...))
|
||||
}
|
||||
|
||||
func emitterError(e *yaml_emitter_t) error {
|
||||
var cause error
|
||||
if len(e.problem) > 0 {
|
||||
cause = errors.New(e.problem)
|
||||
} else {
|
||||
cause = errors.New("failed to write YAML token") // useless generic error, then
|
||||
}
|
||||
return cause
|
||||
}
|
268
vendor/github.com/zclconf/go-cty-yaml/implied_type.go
generated
vendored
268
vendor/github.com/zclconf/go-cty-yaml/implied_type.go
generated
vendored
@ -1,268 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
func (c *Converter) impliedType(src []byte) (cty.Type, error) {
|
||||
p := &yaml_parser_t{}
|
||||
if !yaml_parser_initialize(p) {
|
||||
return cty.NilType, errors.New("failed to initialize YAML parser")
|
||||
}
|
||||
if len(src) == 0 {
|
||||
src = []byte{'\n'}
|
||||
}
|
||||
|
||||
an := &typeAnalysis{
|
||||
anchorsPending: map[string]int{},
|
||||
anchorTypes: map[string]cty.Type{},
|
||||
}
|
||||
|
||||
yaml_parser_set_input_string(p, src)
|
||||
|
||||
var evt yaml_event_t
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_STREAM_START_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&evt, "missing stream start token")
|
||||
}
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_DOCUMENT_START_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&evt, "missing start of document")
|
||||
}
|
||||
|
||||
ty, err := c.impliedTypeParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if evt.typ == yaml_DOCUMENT_START_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed")
|
||||
}
|
||||
if evt.typ != yaml_DOCUMENT_END_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
|
||||
}
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if evt.typ != yaml_STREAM_END_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value")
|
||||
}
|
||||
|
||||
return ty, err
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) {
|
||||
var evt yaml_event_t
|
||||
if !yaml_parser_parse(p, &evt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
return c.impliedTypeParseRemainder(an, &evt, p)
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
|
||||
switch evt.typ {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return c.impliedTypeScalar(an, evt, p)
|
||||
case yaml_ALIAS_EVENT:
|
||||
return c.impliedTypeAlias(an, evt, p)
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return c.impliedTypeMapping(an, evt, p)
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return c.impliedTypeSequence(an, evt, p)
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return cty.NilType, parseEventErrorf(evt, "only a single document is allowed")
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Decoding an empty buffer, probably
|
||||
return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream")
|
||||
default:
|
||||
// Should never happen; the above should be comprehensive
|
||||
return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
|
||||
src := evt.value
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
implicit := evt.implicit
|
||||
|
||||
if len(anchor) > 0 {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
var ty cty.Type
|
||||
switch {
|
||||
case tag == "" && !implicit:
|
||||
// Untagged explicit string
|
||||
ty = cty.String
|
||||
default:
|
||||
v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
|
||||
if err != nil {
|
||||
return cty.NilType, parseEventErrorWrap(evt, err)
|
||||
}
|
||||
if v.RawEquals(mergeMappingVal) {
|
||||
// In any context other than a mapping key, this is just a plain string
|
||||
ty = cty.String
|
||||
} else {
|
||||
ty = v.Type()
|
||||
}
|
||||
}
|
||||
|
||||
if len(anchor) > 0 {
|
||||
an.completeAnchor(anchor, ty)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
|
||||
if tag != "" && tag != yaml_MAP_TAG {
|
||||
return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
|
||||
}
|
||||
|
||||
if anchor != "" {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
atys := make(map[string]cty.Type)
|
||||
for {
|
||||
var nextEvt yaml_event_t
|
||||
if !yaml_parser_parse(p, &nextEvt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if nextEvt.typ == yaml_MAPPING_END_EVENT {
|
||||
ty := cty.Object(atys)
|
||||
if anchor != "" {
|
||||
an.completeAnchor(anchor, ty)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
||||
|
||||
if nextEvt.typ != yaml_SCALAR_EVENT {
|
||||
return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
|
||||
}
|
||||
keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
if keyVal.RawEquals(mergeMappingVal) {
|
||||
// Merging the value (which must be a mapping) into our mapping,
|
||||
// then.
|
||||
ty, err := c.impliedTypeParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
if !ty.IsObjectType() {
|
||||
return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
|
||||
}
|
||||
for name, aty := range ty.AttributeTypes() {
|
||||
atys[name] = aty
|
||||
}
|
||||
continue
|
||||
}
|
||||
if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
|
||||
keyVal = keyValStr
|
||||
} else {
|
||||
return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
|
||||
}
|
||||
if keyVal.IsNull() {
|
||||
return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null")
|
||||
}
|
||||
if !keyVal.IsKnown() {
|
||||
return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known")
|
||||
}
|
||||
valTy, err := c.impliedTypeParse(an, p)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
atys[keyVal.AsString()] = valTy
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
|
||||
tag := string(evt.tag)
|
||||
anchor := string(evt.anchor)
|
||||
|
||||
if tag != "" && tag != yaml_SEQ_TAG {
|
||||
return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
|
||||
}
|
||||
|
||||
if anchor != "" {
|
||||
an.beginAnchor(anchor)
|
||||
}
|
||||
|
||||
var atys []cty.Type
|
||||
for {
|
||||
var nextEvt yaml_event_t
|
||||
if !yaml_parser_parse(p, &nextEvt) {
|
||||
return cty.NilType, parserError(p)
|
||||
}
|
||||
if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
|
||||
ty := cty.Tuple(atys)
|
||||
if anchor != "" {
|
||||
an.completeAnchor(anchor, ty)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
||||
|
||||
valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
atys = append(atys, valTy)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
|
||||
ty, err := an.anchorType(string(evt.anchor))
|
||||
if err != nil {
|
||||
err = parseEventErrorWrap(evt, err)
|
||||
}
|
||||
return ty, err
|
||||
}
|
||||
|
||||
type typeAnalysis struct {
|
||||
anchorsPending map[string]int
|
||||
anchorTypes map[string]cty.Type
|
||||
}
|
||||
|
||||
func (an *typeAnalysis) beginAnchor(name string) {
|
||||
an.anchorsPending[name]++
|
||||
}
|
||||
|
||||
func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) {
|
||||
an.anchorsPending[name]--
|
||||
if an.anchorsPending[name] == 0 {
|
||||
delete(an.anchorsPending, name)
|
||||
}
|
||||
an.anchorTypes[name] = ty
|
||||
}
|
||||
|
||||
func (an *typeAnalysis) anchorType(name string) (cty.Type, error) {
|
||||
if _, pending := an.anchorsPending[name]; pending {
|
||||
// YAML normally allows self-referencing structures, but cty cannot
|
||||
// represent them (it requires all structures to be finite) so we
|
||||
// must fail here.
|
||||
return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
|
||||
}
|
||||
ty, ok := an.anchorTypes[name]
|
||||
if !ok {
|
||||
return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name)
|
||||
}
|
||||
return ty, nil
|
||||
}
|
1095
vendor/github.com/zclconf/go-cty-yaml/parserc.go
generated
vendored
1095
vendor/github.com/zclconf/go-cty-yaml/parserc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
412
vendor/github.com/zclconf/go-cty-yaml/readerc.go
generated
vendored
412
vendor/github.com/zclconf/go-cty-yaml/readerc.go
generated
vendored
@ -1,412 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
288
vendor/github.com/zclconf/go-cty-yaml/resolve.go
generated
vendored
288
vendor/github.com/zclconf/go-cty-yaml/resolve.go
generated
vendored
@ -1,288 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
value cty.Value
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v cty.Value
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||
{cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||
{cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||
{cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||
{cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||
{cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||
{cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||
{cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
// TODO This can easily be made faster and produce less garbage.
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||||
|
||||
func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) {
|
||||
if !resolvableTag(tag) {
|
||||
return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
|
||||
}
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if src != "" {
|
||||
hint = resolveTable[src[0]]
|
||||
}
|
||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||
if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE {
|
||||
return cty.StringVal(src), nil
|
||||
}
|
||||
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[src]; ok {
|
||||
return item.value, nil
|
||||
}
|
||||
|
||||
if tag == "" {
|
||||
for _, nan := range []string{".nan", ".NaN", ".NAN"} {
|
||||
if src == nan {
|
||||
// cty cannot represent NaN, so this is an error
|
||||
return cty.NilVal, fmt.Errorf("floating point NaN is not supported")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Base 60 floats are intentionally not supported.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
if numberVal, err := cty.ParseNumberVal(src); err == nil {
|
||||
return numberVal, nil
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
||||
// !!timestamp tag.
|
||||
if tag == "" || tag == yaml_TIMESTAMP_TAG {
|
||||
t, ok := parseTimestamp(src)
|
||||
if ok {
|
||||
// cty has no timestamp type, but its functions stdlib
|
||||
// conventionally uses strings in an RFC3339 encoding
|
||||
// to represent time, so we'll follow that convention here.
|
||||
return cty.StringVal(t.Format(time.RFC3339)), nil
|
||||
}
|
||||
}
|
||||
|
||||
plain := strings.Replace(src, "_", "", -1)
|
||||
if numberVal, err := cty.ParseNumberVal(plain); err == nil {
|
||||
return numberVal, nil
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") {
|
||||
tag = yaml_INT_TAG // will handle parsing below in our tag switch
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src))
|
||||
}
|
||||
}
|
||||
|
||||
if tag == "" && src == "<<" {
|
||||
return mergeMappingVal, nil
|
||||
}
|
||||
|
||||
switch tag {
|
||||
case yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
// If it's binary then we want to keep the base64 representation, because
|
||||
// cty has no binary type, but we will check that it's actually base64.
|
||||
if tag == yaml_BINARY_TAG {
|
||||
_, err := base64.StdEncoding.DecodeString(src)
|
||||
if err != nil {
|
||||
return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag)
|
||||
}
|
||||
}
|
||||
return cty.StringVal(src), nil
|
||||
case yaml_BOOL_TAG:
|
||||
item, ok := resolveMap[src]
|
||||
if !ok || item.tag != yaml_BOOL_TAG {
|
||||
return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
|
||||
}
|
||||
return item.value, nil
|
||||
case yaml_FLOAT_TAG, yaml_INT_TAG:
|
||||
// Note: We don't actually check that a value tagged INT is a whole
|
||||
// number here. We could, but cty generally doesn't care about the
|
||||
// int/float distinction, so we'll just be generous and accept it.
|
||||
plain := strings.Replace(src, "_", "", -1)
|
||||
if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats
|
||||
return numberVal, nil
|
||||
}
|
||||
if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
|
||||
return cty.NumberIntVal(intv), nil
|
||||
}
|
||||
if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
|
||||
return cty.NumberUIntVal(uintv), nil
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return cty.NumberIntVal(intv), nil
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return cty.NumberUIntVal(uintv), nil
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
return cty.NumberIntVal(intv), nil
|
||||
}
|
||||
}
|
||||
return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
|
||||
case yaml_TIMESTAMP_TAG:
|
||||
t, ok := parseTimestamp(src)
|
||||
if ok {
|
||||
// cty has no timestamp type, but its functions stdlib
|
||||
// conventionally uses strings in an RFC3339 encoding
|
||||
// to represent time, so we'll follow that convention here.
|
||||
return cty.StringVal(t.Format(time.RFC3339)), nil
|
||||
}
|
||||
return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
|
||||
case yaml_NULL_TAG:
|
||||
return cty.NullVal(cty.DynamicPseudoType), nil
|
||||
case "":
|
||||
return cty.StringVal(src), nil
|
||||
default:
|
||||
return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
|
||||
}
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
||||
|
||||
// This is a subset of the formats allowed by the regular expression
|
||||
// defined at http://yaml.org/type/timestamp.html.
|
||||
var allowedTimestampFormats = []string{
|
||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||
"2006-1-2", // date only
|
||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||||
// from the set of examples.
|
||||
}
|
||||
|
||||
// parseTimestamp parses s as a timestamp string and
|
||||
// returns the timestamp and reports whether it succeeded.
|
||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||||
func parseTimestamp(s string) (time.Time, bool) {
|
||||
// TODO write code to check all the formats supported by
|
||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||||
|
||||
// Quick check: all date formats start with YYYY-.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if c := s[i]; c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i != 4 || i == len(s) || s[i] != '-' {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, format := range allowedTimestampFormats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
type mergeMapping struct{}
|
||||
|
||||
var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{}))
|
||||
var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{})
|
2696
vendor/github.com/zclconf/go-cty-yaml/scannerc.go
generated
vendored
2696
vendor/github.com/zclconf/go-cty-yaml/scannerc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
26
vendor/github.com/zclconf/go-cty-yaml/writerc.go
generated
vendored
26
vendor/github.com/zclconf/go-cty-yaml/writerc.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||
emitter.error = yaml_WRITER_ERROR
|
||||
emitter.problem = problem
|
||||
return false
|
||||
}
|
||||
|
||||
// Flush the output buffer.
|
||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
if emitter.write_handler == nil {
|
||||
panic("write handler not set")
|
||||
}
|
||||
|
||||
// Check if the buffer is empty.
|
||||
if emitter.buffer_pos == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
215
vendor/github.com/zclconf/go-cty-yaml/yaml.go
generated
vendored
215
vendor/github.com/zclconf/go-cty-yaml/yaml.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
// Package yaml can marshal and unmarshal cty values in YAML format.
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Unmarshal reads the document found within the given source buffer
|
||||
// and attempts to convert it into a value conforming to the given type
|
||||
// constraint.
|
||||
//
|
||||
// This is an alias for Unmarshal on the predefined Converter in "Standard".
|
||||
//
|
||||
// An error is returned if the given source contains any YAML document
|
||||
// delimiters.
|
||||
func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
|
||||
return Standard.Unmarshal(src, ty)
|
||||
}
|
||||
|
||||
// Marshal serializes the given value into a YAML document, using a fixed
|
||||
// mapping from cty types to YAML constructs.
|
||||
//
|
||||
// This is an alias for Marshal on the predefined Converter in "Standard".
|
||||
//
|
||||
// Note that unlike the function of the same name in the cty JSON package,
|
||||
// this does not take a type constraint and therefore the YAML serialization
|
||||
// cannot preserve late-bound type information in the serialization to be
|
||||
// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
|
||||
// constraint given to Unmarshal will be decoded as if the corresponding portion
|
||||
// of the input were processed with ImpliedType to find a target type.
|
||||
func Marshal(v cty.Value) ([]byte, error) {
|
||||
return Standard.Marshal(v)
|
||||
}
|
||||
|
||||
// ImpliedType analyzes the given source code and returns a suitable type that
|
||||
// it could be decoded into.
|
||||
//
|
||||
// For a converter that is using standard YAML rather than cty-specific custom
|
||||
// tags, only a subset of cty types can be produced: strings, numbers, bools,
|
||||
// tuple types, and object types.
|
||||
//
|
||||
// This is an alias for ImpliedType on the predefined Converter in "Standard".
|
||||
func ImpliedType(src []byte) (cty.Type, error) {
|
||||
return Standard.ImpliedType(src)
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
*err = e.err
|
||||
} else {
|
||||
panic(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type yamlError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
panic(yamlError{err})
|
||||
}
|
||||
|
||||
func failf(format string, args ...interface{}) {
|
||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
|
||||
// InlineMap is the number of the field in the struct that
|
||||
// contains an ,inline map, or -1 if there's none.
|
||||
InlineMap int
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
// Id holds the unique field identifier, so we can cheaply
|
||||
// check for field duplicates without maintaining an extra map.
|
||||
Id int
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
fieldMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("yaml")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "flow":
|
||||
info.Flow = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
finfo.Id = len(fieldsList)
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||
return nil, errors.New("Option ,inline needs a struct value field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
info.Id = len(fieldsList)
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{
|
||||
FieldsMap: fieldsMap,
|
||||
FieldsList: fieldsList,
|
||||
InlineMap: inlineMap,
|
||||
}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
fieldMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
738
vendor/github.com/zclconf/go-cty-yaml/yamlh.go
generated
vendored
738
vendor/github.com/zclconf/go-cty-yaml/yamlh.go
generated
vendored
@ -1,738 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// The version directive data.
|
||||
type yaml_version_directive_t struct {
|
||||
major int8 // The major version number.
|
||||
minor int8 // The minor version number.
|
||||
}
|
||||
|
||||
// The tag directive data.
|
||||
type yaml_tag_directive_t struct {
|
||||
handle []byte // The tag handle.
|
||||
prefix []byte // The tag prefix.
|
||||
}
|
||||
|
||||
type yaml_encoding_t int
|
||||
|
||||
// The stream encoding.
|
||||
const (
|
||||
// Let the parser choose the encoding.
|
||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||
|
||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||
)
|
||||
|
||||
type yaml_break_t int
|
||||
|
||||
// Line break types.
|
||||
const (
|
||||
// Let the parser choose the break type.
|
||||
yaml_ANY_BREAK yaml_break_t = iota
|
||||
|
||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||
)
|
||||
|
||||
type yaml_error_type_t int
|
||||
|
||||
// Many bad things could happen with the parser and emitter.
|
||||
const (
|
||||
// No error is produced.
|
||||
yaml_NO_ERROR yaml_error_type_t = iota
|
||||
|
||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||
)
|
||||
|
||||
// The pointer position.
|
||||
type yaml_mark_t struct {
|
||||
index int // The position index.
|
||||
line int // The position line.
|
||||
column int // The position column.
|
||||
}
|
||||
|
||||
// Node Styles
|
||||
|
||||
type yaml_style_t int8
|
||||
|
||||
type yaml_scalar_style_t yaml_style_t
|
||||
|
||||
// Scalar styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
||||
|
||||
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||
)
|
||||
|
||||
type yaml_sequence_style_t yaml_style_t
|
||||
|
||||
// Sequence styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||
|
||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||
)
|
||||
|
||||
type yaml_mapping_style_t yaml_style_t
|
||||
|
||||
// Mapping styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||
|
||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||
)
|
||||
|
||||
// Tokens
|
||||
|
||||
type yaml_token_type_t int
|
||||
|
||||
// Token types.
|
||||
const (
|
||||
// An empty token.
|
||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||
|
||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||
|
||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||
|
||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||
|
||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||
|
||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||
yaml_KEY_TOKEN // A KEY token.
|
||||
yaml_VALUE_TOKEN // A VALUE token.
|
||||
|
||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||
yaml_TAG_TOKEN // A TAG token.
|
||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||
)
|
||||
|
||||
func (tt yaml_token_type_t) String() string {
|
||||
switch tt {
|
||||
case yaml_NO_TOKEN:
|
||||
return "yaml_NO_TOKEN"
|
||||
case yaml_STREAM_START_TOKEN:
|
||||
return "yaml_STREAM_START_TOKEN"
|
||||
case yaml_STREAM_END_TOKEN:
|
||||
return "yaml_STREAM_END_TOKEN"
|
||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||
case yaml_DOCUMENT_START_TOKEN:
|
||||
return "yaml_DOCUMENT_START_TOKEN"
|
||||
case yaml_DOCUMENT_END_TOKEN:
|
||||
return "yaml_DOCUMENT_END_TOKEN"
|
||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||
case yaml_BLOCK_END_TOKEN:
|
||||
return "yaml_BLOCK_END_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||
case yaml_BLOCK_ENTRY_TOKEN:
|
||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||
case yaml_FLOW_ENTRY_TOKEN:
|
||||
return "yaml_FLOW_ENTRY_TOKEN"
|
||||
case yaml_KEY_TOKEN:
|
||||
return "yaml_KEY_TOKEN"
|
||||
case yaml_VALUE_TOKEN:
|
||||
return "yaml_VALUE_TOKEN"
|
||||
case yaml_ALIAS_TOKEN:
|
||||
return "yaml_ALIAS_TOKEN"
|
||||
case yaml_ANCHOR_TOKEN:
|
||||
return "yaml_ANCHOR_TOKEN"
|
||||
case yaml_TAG_TOKEN:
|
||||
return "yaml_TAG_TOKEN"
|
||||
case yaml_SCALAR_TOKEN:
|
||||
return "yaml_SCALAR_TOKEN"
|
||||
}
|
||||
return "<unknown token>"
|
||||
}
|
||||
|
||||
// The token structure.
|
||||
type yaml_token_t struct {
|
||||
// The token type.
|
||||
typ yaml_token_type_t
|
||||
|
||||
// The start/end of the token.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The alias/anchor/scalar value or tag/tag directive handle
|
||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||
value []byte
|
||||
|
||||
// The tag suffix (for yaml_TAG_TOKEN).
|
||||
suffix []byte
|
||||
|
||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||
prefix []byte
|
||||
|
||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||
style yaml_scalar_style_t
|
||||
|
||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||
major, minor int8
|
||||
}
|
||||
|
||||
// Events
|
||||
|
||||
type yaml_event_type_t int8
|
||||
|
||||
// Event types.
|
||||
const (
|
||||
// An empty event.
|
||||
yaml_NO_EVENT yaml_event_type_t = iota
|
||||
|
||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||
yaml_ALIAS_EVENT // An ALIAS event.
|
||||
yaml_SCALAR_EVENT // A SCALAR event.
|
||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
)
|
||||
|
||||
var eventStrings = []string{
|
||||
yaml_NO_EVENT: "none",
|
||||
yaml_STREAM_START_EVENT: "stream start",
|
||||
yaml_STREAM_END_EVENT: "stream end",
|
||||
yaml_DOCUMENT_START_EVENT: "document start",
|
||||
yaml_DOCUMENT_END_EVENT: "document end",
|
||||
yaml_ALIAS_EVENT: "alias",
|
||||
yaml_SCALAR_EVENT: "scalar",
|
||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
||||
yaml_MAPPING_START_EVENT: "mapping start",
|
||||
yaml_MAPPING_END_EVENT: "mapping end",
|
||||
}
|
||||
|
||||
func (e yaml_event_type_t) String() string {
|
||||
if e < 0 || int(e) >= len(eventStrings) {
|
||||
return fmt.Sprintf("unknown event %d", e)
|
||||
}
|
||||
return eventStrings[e]
|
||||
}
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
// The event type.
|
||||
typ yaml_event_type_t
|
||||
|
||||
// The start and end of the event.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||
tag_directives []yaml_tag_directive_t
|
||||
|
||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||
anchor []byte
|
||||
|
||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
tag []byte
|
||||
|
||||
// The scalar value (for yaml_SCALAR_EVENT).
|
||||
value []byte
|
||||
|
||||
// Is the document start/end indicator implicit, or the tag optional?
|
||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||
implicit bool
|
||||
|
||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||
quoted_implicit bool
|
||||
|
||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
style yaml_style_t
|
||||
}
|
||||
|
||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||
|
||||
// Nodes
|
||||
|
||||
const (
|
||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||
)
|
||||
|
||||
type yaml_node_type_t int
|
||||
|
||||
// Node types.
|
||||
const (
|
||||
// An empty node.
|
||||
yaml_NO_NODE yaml_node_type_t = iota
|
||||
|
||||
yaml_SCALAR_NODE // A scalar node.
|
||||
yaml_SEQUENCE_NODE // A sequence node.
|
||||
yaml_MAPPING_NODE // A mapping node.
|
||||
)
|
||||
|
||||
// An element of a sequence node.
|
||||
type yaml_node_item_t int
|
||||
|
||||
// An element of a mapping node.
|
||||
type yaml_node_pair_t struct {
|
||||
key int // The key of the element.
|
||||
value int // The value of the element.
|
||||
}
|
||||
|
||||
// The node structure.
|
||||
type yaml_node_t struct {
|
||||
typ yaml_node_type_t // The node type.
|
||||
tag []byte // The node tag.
|
||||
|
||||
// The node data.
|
||||
|
||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||
scalar struct {
|
||||
value []byte // The scalar value.
|
||||
length int // The length of the scalar value.
|
||||
style yaml_scalar_style_t // The scalar style.
|
||||
}
|
||||
|
||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||
sequence struct {
|
||||
items_data []yaml_node_item_t // The stack of sequence items.
|
||||
style yaml_sequence_style_t // The sequence style.
|
||||
}
|
||||
|
||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||
mapping struct {
|
||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||
style yaml_mapping_style_t // The mapping style.
|
||||
}
|
||||
|
||||
start_mark yaml_mark_t // The beginning of the node.
|
||||
end_mark yaml_mark_t // The end of the node.
|
||||
|
||||
}
|
||||
|
||||
// The document structure.
|
||||
type yaml_document_t struct {
|
||||
|
||||
// The document nodes.
|
||||
nodes []yaml_node_t
|
||||
|
||||
// The version directive.
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives.
|
||||
tag_directives_data []yaml_tag_directive_t
|
||||
tag_directives_start int // The beginning of the tag directives list.
|
||||
tag_directives_end int // The end of the tag directives list.
|
||||
|
||||
start_implicit int // Is the document start indicator implicit?
|
||||
end_implicit int // Is the document end indicator implicit?
|
||||
|
||||
// The start/end of the document.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
}
|
||||
|
||||
// The prototype of a read handler.
|
||||
//
|
||||
// The read handler is called when the parser needs to read more bytes from the
|
||||
// source. The handler should write not more than size bytes to the buffer.
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
// yaml_parser_set_input().
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
//
|
||||
// On success, the handler should return 1. If the handler failed,
|
||||
// the returned value should be 0. On EOF, the handler should set the
|
||||
// size_read to 0 and return 1.
|
||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||
|
||||
// This structure holds information about a potential simple key.
|
||||
type yaml_simple_key_t struct {
|
||||
possible bool // Is a simple key possible?
|
||||
required bool // Is a simple key required?
|
||||
token_number int // The number of the token.
|
||||
mark yaml_mark_t // The position mark.
|
||||
}
|
||||
|
||||
// The states of the parser.
|
||||
type yaml_parser_state_t int
|
||||
|
||||
const (
|
||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||
|
||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||
yaml_PARSE_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
func (ps yaml_parser_state_t) String() string {
|
||||
switch ps {
|
||||
case yaml_PARSE_STREAM_START_STATE:
|
||||
return "yaml_PARSE_STREAM_START_STATE"
|
||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||
case yaml_PARSE_FLOW_NODE_STATE:
|
||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||
case yaml_PARSE_END_STATE:
|
||||
return "yaml_PARSE_END_STATE"
|
||||
}
|
||||
return "<unknown parser state>"
|
||||
}
|
||||
|
||||
// This structure holds aliases data.
|
||||
type yaml_alias_data_t struct {
|
||||
anchor []byte // The anchor.
|
||||
index int // The node id.
|
||||
mark yaml_mark_t // The anchor mark.
|
||||
}
|
||||
|
||||
// The parser structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the
|
||||
// yaml_parser_ family of functions.
|
||||
type yaml_parser_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
|
||||
problem string // Error description.
|
||||
|
||||
// The byte about which the problem occurred.
|
||||
problem_offset int
|
||||
problem_value int
|
||||
problem_mark yaml_mark_t
|
||||
|
||||
// The error context.
|
||||
context string
|
||||
context_mark yaml_mark_t
|
||||
|
||||
// Reader stuff
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_reader io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
unread int // The number of unread characters in the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The input encoding.
|
||||
|
||||
offset int // The offset of the current position (in bytes).
|
||||
mark yaml_mark_t // The mark of the current position.
|
||||
|
||||
// Scanner stuff
|
||||
|
||||
stream_start_produced bool // Have we started to scan the input stream?
|
||||
stream_end_produced bool // Have we reached the end of the input stream?
|
||||
|
||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||
|
||||
tokens []yaml_token_t // The tokens queue.
|
||||
tokens_head int // The head of the tokens queue.
|
||||
tokens_parsed int // The number of tokens fetched from the queue.
|
||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||
|
||||
indent int // The current indentation level.
|
||||
indents []int // The indentation levels stack.
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
|
||||
// Parser stuff
|
||||
|
||||
state yaml_parser_state_t // The current parser state.
|
||||
states []yaml_parser_state_t // The parser states stack.
|
||||
marks []yaml_mark_t // The stack of marks.
|
||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
aliases []yaml_alias_data_t // The alias data.
|
||||
|
||||
document *yaml_document_t // The currently parsed document.
|
||||
}
|
||||
|
||||
// Emitter Definitions
|
||||
|
||||
// The prototype of a write handler.
|
||||
//
|
||||
// The write handler is called when the emitter needs to flush the accumulated
|
||||
// characters to the output. The handler should write @a size bytes of the
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
// yaml_emitter_set_output().
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
//
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
|
||||
// The emitter states.
|
||||
const (
|
||||
// Expect STREAM-START.
|
||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||
|
||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||
yaml_EMIT_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
// The emitter structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||
// family of functions.
|
||||
type yaml_emitter_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
problem string // Error description.
|
||||
|
||||
// Writer stuff
|
||||
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_writer io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The stream encoding.
|
||||
|
||||
// Emitter stuff
|
||||
|
||||
canonical bool // If the output is in the canonical style?
|
||||
best_indent int // The number of indentation spaces.
|
||||
best_width int // The preferred width of the output lines.
|
||||
unicode bool // Allow unescaped non-ASCII characters?
|
||||
line_break yaml_break_t // The preferred line break.
|
||||
|
||||
state yaml_emitter_state_t // The current emitter state.
|
||||
states []yaml_emitter_state_t // The stack of states.
|
||||
|
||||
events []yaml_event_t // The event queue.
|
||||
events_head int // The head of the event queue.
|
||||
|
||||
indents []int // The stack of indentation levels.
|
||||
|
||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
sequence_context bool // Is it a sequence context?
|
||||
mapping_context bool // Is it a mapping context?
|
||||
simple_key_context bool // Is it a simple mapping key context?
|
||||
|
||||
line int // The current line.
|
||||
column int // The current column.
|
||||
whitespace bool // If the last character was a whitespace?
|
||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||
open_ended bool // If an explicit document end is required?
|
||||
|
||||
// Anchor analysis.
|
||||
anchor_data struct {
|
||||
anchor []byte // The anchor value.
|
||||
alias bool // Is it an alias?
|
||||
}
|
||||
|
||||
// Tag analysis.
|
||||
tag_data struct {
|
||||
handle []byte // The tag handle.
|
||||
suffix []byte // The tag suffix.
|
||||
}
|
||||
|
||||
// Scalar analysis.
|
||||
scalar_data struct {
|
||||
value []byte // The scalar value.
|
||||
multiline bool // Does the scalar contain line breaks?
|
||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||
style yaml_scalar_style_t // The output style.
|
||||
}
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
opened bool // If the stream was already opened?
|
||||
closed bool // If the stream was already closed?
|
||||
|
||||
// The information associated with the document nodes.
|
||||
anchors *struct {
|
||||
references int // The number of references.
|
||||
anchor int // The anchor id.
|
||||
serialized bool // If the node has been emitted?
|
||||
}
|
||||
|
||||
last_anchor_id int // The last assigned anchor id.
|
||||
|
||||
document *yaml_document_t // The currently emitted document.
|
||||
}
|
173
vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go
generated
vendored
173
vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
||||
input_raw_buffer_size = 512
|
||||
|
||||
// The size of the input buffer.
|
||||
// It should be possible to decode the whole raw buffer.
|
||||
input_buffer_size = input_raw_buffer_size * 3
|
||||
|
||||
// The size of the output buffer.
|
||||
output_buffer_size = 128
|
||||
|
||||
// The size of the output raw buffer.
|
||||
// It should be possible to encode the whole output buffer.
|
||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||
|
||||
// The size of other stacks and queues.
|
||||
initial_stack_size = 16
|
||||
initial_queue_size = 16
|
||||
initial_string_size = 16
|
||||
)
|
||||
|
||||
// Check if the character at the specified position is an alphabetical
|
||||
// character, a digit, '_', or '-'.
|
||||
func is_alpha(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a digit.
|
||||
func is_digit(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9'
|
||||
}
|
||||
|
||||
// Get the value of a digit.
|
||||
func as_digit(b []byte, i int) int {
|
||||
return int(b[i]) - '0'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a hex-digit.
|
||||
func is_hex(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||
}
|
||||
|
||||
// Get the value of a hex-digit.
|
||||
func as_hex(b []byte, i int) int {
|
||||
bi := b[i]
|
||||
if bi >= 'A' && bi <= 'F' {
|
||||
return int(bi) - 'A' + 10
|
||||
}
|
||||
if bi >= 'a' && bi <= 'f' {
|
||||
return int(bi) - 'a' + 10
|
||||
}
|
||||
return int(bi) - '0'
|
||||
}
|
||||
|
||||
// Check if the character is ASCII.
|
||||
func is_ascii(b []byte, i int) bool {
|
||||
return b[i] <= 0x7F
|
||||
}
|
||||
|
||||
// Check if the character at the start of the buffer can be printed unescaped.
|
||||
func is_printable(b []byte, i int) bool {
|
||||
return ((b[i] == 0x0A) || // . == #x0A
|
||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||
(b[i] == 0xEE) ||
|
||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is NUL.
|
||||
func is_z(b []byte, i int) bool {
|
||||
return b[i] == 0x00
|
||||
}
|
||||
|
||||
// Check if the beginning of the buffer is a BOM.
|
||||
func is_bom(b []byte, i int) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is space.
|
||||
func is_space(b []byte, i int) bool {
|
||||
return b[i] == ' '
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is tab.
|
||||
func is_tab(b []byte, i int) bool {
|
||||
return b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is blank (space or tab).
|
||||
func is_blank(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_tab(b, i)
|
||||
return b[i] == ' ' || b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a line break.
|
||||
func is_break(b []byte, i int) bool {
|
||||
return (b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||
}
|
||||
|
||||
func is_crlf(b []byte, i int) bool {
|
||||
return b[i] == '\r' && b[i+1] == '\n'
|
||||
}
|
||||
|
||||
// Check if the character is a line break or NUL.
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return ( // is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
// is_z:
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, or NUL.
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return ( // is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, tab, or NUL.
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return ( // is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Determine the width of the character.
|
||||
func width(b byte) int {
|
||||
// Don't replace these by a switch without first
|
||||
// confirming that it is being inlined.
|
||||
if b&0x80 == 0x00 {
|
||||
return 1
|
||||
}
|
||||
if b&0xE0 == 0xC0 {
|
||||
return 2
|
||||
}
|
||||
if b&0xF0 == 0xE0 {
|
||||
return 3
|
||||
}
|
||||
if b&0xF8 == 0xF0 {
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
|
||||
}
|
14
vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go
generated
vendored
14
vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// Package msgpack provides functions for serializing cty values in the
|
||||
// msgpack encoding, and decoding them again.
|
||||
//
|
||||
// If the same type information is provided both at encoding and decoding time
|
||||
// then values can be round-tripped without loss, except for capsule types
|
||||
// which are not currently supported.
|
||||
//
|
||||
// If any unknown values are passed to Marshal then they will be represented
|
||||
// using a msgpack extension with type code zero, which is understood by
|
||||
// the Unmarshal function within this package but will not be understood by
|
||||
// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values
|
||||
// are used if interoperability with other msgpack implementations is
|
||||
// required.
|
||||
package msgpack
|
31
vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go
generated
vendored
31
vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/vmihailenco/msgpack/v4"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
type dynamicVal struct {
|
||||
Value cty.Value
|
||||
Path cty.Path
|
||||
}
|
||||
|
||||
func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) {
|
||||
// Rather than defining a msgpack-specific serialization of types,
|
||||
// instead we use the existing JSON serialization.
|
||||
typeJSON, err := dv.Value.Type().MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, dv.Path.NewErrorf("failed to serialize type: %s", err)
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
enc := msgpack.NewEncoder(&buf)
|
||||
enc.EncodeArrayLen(2)
|
||||
enc.EncodeBytes(typeJSON)
|
||||
err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
8
vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go
generated
vendored
8
vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
var negativeInfinity = math.Inf(-1)
|
||||
var positiveInfinity = math.Inf(1)
|
212
vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go
generated
vendored
212
vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go
generated
vendored
@ -1,212 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/vmihailenco/msgpack/v4"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
|
||||
// Marshal produces a msgpack serialization of the given value that
|
||||
// can be decoded into the given type later using Unmarshal.
|
||||
//
|
||||
// The given value must conform to the given type, or an error will
|
||||
// be returned.
|
||||
func Marshal(val cty.Value, ty cty.Type) ([]byte, error) {
|
||||
errs := val.Type().TestConformance(ty)
|
||||
if errs != nil {
|
||||
// Attempt a conversion
|
||||
var err error
|
||||
val, err = convert.Convert(val, ty)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// From this point onward, val can be assumed to be conforming to t.
|
||||
|
||||
var path cty.Path
|
||||
var buf bytes.Buffer
|
||||
enc := msgpack.NewEncoder(&buf)
|
||||
enc.UseCompactEncoding(true)
|
||||
|
||||
err := marshal(val, ty, path, enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error {
|
||||
if val.IsMarked() {
|
||||
return path.NewErrorf("value has marks, so it cannot be serialized")
|
||||
}
|
||||
|
||||
// If we're going to decode as DynamicPseudoType then we need to save
|
||||
// dynamic type information to recover the real type.
|
||||
if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType {
|
||||
return marshalDynamic(val, path, enc)
|
||||
}
|
||||
|
||||
if !val.IsKnown() {
|
||||
err := enc.Encode(unknownVal)
|
||||
if err != nil {
|
||||
return path.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if val.IsNull() {
|
||||
err := enc.EncodeNil()
|
||||
if err != nil {
|
||||
return path.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The caller should've guaranteed that the given val is conformant with
|
||||
// the given type ty, so we'll proceed under that assumption here.
|
||||
switch {
|
||||
case ty.IsPrimitiveType():
|
||||
switch ty {
|
||||
case cty.String:
|
||||
err := enc.EncodeString(val.AsString())
|
||||
if err != nil {
|
||||
return path.NewError(err)
|
||||
}
|
||||
return nil
|
||||
case cty.Number:
|
||||
var err error
|
||||
switch {
|
||||
case val.RawEquals(cty.PositiveInfinity):
|
||||
err = enc.EncodeFloat64(positiveInfinity)
|
||||
case val.RawEquals(cty.NegativeInfinity):
|
||||
err = enc.EncodeFloat64(negativeInfinity)
|
||||
default:
|
||||
bf := val.AsBigFloat()
|
||||
if iv, acc := bf.Int64(); acc == big.Exact {
|
||||
err = enc.EncodeInt(iv)
|
||||
} else if fv, acc := bf.Float64(); acc == big.Exact {
|
||||
err = enc.EncodeFloat64(fv)
|
||||
} else {
|
||||
err = enc.EncodeString(bf.Text('f', -1))
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return path.NewError(err)
|
||||
}
|
||||
return nil
|
||||
case cty.Bool:
|
||||
err := enc.EncodeBool(val.True())
|
||||
if err != nil {
|
||||
return path.NewError(err)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
panic("unsupported primitive type")
|
||||
}
|
||||
case ty.IsListType(), ty.IsSetType():
|
||||
enc.EncodeArrayLen(val.LengthInt())
|
||||
ety := ty.ElementType()
|
||||
it := val.ElementIterator()
|
||||
path := append(path, nil) // local override of 'path' with extra element
|
||||
for it.Next() {
|
||||
ek, ev := it.Element()
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: ek,
|
||||
}
|
||||
err := marshal(ev, ety, path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case ty.IsMapType():
|
||||
enc.EncodeMapLen(val.LengthInt())
|
||||
ety := ty.ElementType()
|
||||
it := val.ElementIterator()
|
||||
path := append(path, nil) // local override of 'path' with extra element
|
||||
for it.Next() {
|
||||
ek, ev := it.Element()
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: ek,
|
||||
}
|
||||
var err error
|
||||
err = marshal(ek, ek.Type(), path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = marshal(ev, ety, path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case ty.IsTupleType():
|
||||
etys := ty.TupleElementTypes()
|
||||
it := val.ElementIterator()
|
||||
path := append(path, nil) // local override of 'path' with extra element
|
||||
i := 0
|
||||
enc.EncodeArrayLen(len(etys))
|
||||
for it.Next() {
|
||||
ety := etys[i]
|
||||
ek, ev := it.Element()
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: ek,
|
||||
}
|
||||
err := marshal(ev, ety, path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i++
|
||||
}
|
||||
return nil
|
||||
case ty.IsObjectType():
|
||||
atys := ty.AttributeTypes()
|
||||
path := append(path, nil) // local override of 'path' with extra element
|
||||
|
||||
names := make([]string, 0, len(atys))
|
||||
for k := range atys {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
enc.EncodeMapLen(len(names))
|
||||
|
||||
for _, k := range names {
|
||||
aty := atys[k]
|
||||
av := val.GetAttr(k)
|
||||
path[len(path)-1] = cty.GetAttrStep{
|
||||
Name: k,
|
||||
}
|
||||
var err error
|
||||
err = marshal(cty.StringVal(k), cty.String, path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = marshal(av, aty, path, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case ty.IsCapsuleType():
|
||||
return path.NewErrorf("capsule types not supported for msgpack encoding")
|
||||
default:
|
||||
// should never happen
|
||||
return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName())
|
||||
}
|
||||
}
|
||||
|
||||
// marshalDynamic adds an extra wrapping object containing dynamic type
|
||||
// information for the given value.
|
||||
func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error {
|
||||
dv := dynamicVal{
|
||||
Value: val,
|
||||
Path: path,
|
||||
}
|
||||
return enc.Encode(&dv)
|
||||
}
|
167
vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go
generated
vendored
167
vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/vmihailenco/msgpack/v4"
|
||||
msgpackcodes "github.com/vmihailenco/msgpack/v4/codes"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// ImpliedType returns the cty Type implied by the structure of the given
|
||||
// msgpack-compliant buffer. This function implements the default type mapping
|
||||
// behavior used when decoding arbitrary msgpack without explicit cty Type
|
||||
// information.
|
||||
//
|
||||
// The rules are as follows:
|
||||
//
|
||||
// msgpack strings, numbers and bools map to their equivalent primitive type in
|
||||
// cty.
|
||||
//
|
||||
// msgpack maps become cty object types, with the attributes defined by the
|
||||
// map keys and the types of their values.
|
||||
//
|
||||
// msgpack arrays become cty tuple types, with the elements defined by the
|
||||
// types of the array members.
|
||||
//
|
||||
// Any nulls are typed as DynamicPseudoType, so callers of this function
|
||||
// must be prepared to deal with this. Callers that do not wish to deal with
|
||||
// dynamic typing should not use this function and should instead describe
|
||||
// their required types explicitly with a cty.Type instance when decoding.
|
||||
//
|
||||
// Any unknown values are similarly typed as DynamicPseudoType, because these
|
||||
// do not carry type information on the wire.
|
||||
//
|
||||
// Any parse errors will be returned as an error, and the type will be the
|
||||
// invalid value cty.NilType.
|
||||
func ImpliedType(buf []byte) (cty.Type, error) {
|
||||
r := bytes.NewReader(buf)
|
||||
dec := msgpack.NewDecoder(r)
|
||||
|
||||
ty, err := impliedType(dec)
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
// We must now be at the end of the buffer
|
||||
err = dec.Skip()
|
||||
if err != io.EOF {
|
||||
return ty, fmt.Errorf("extra bytes after msgpack value")
|
||||
}
|
||||
|
||||
return ty, nil
|
||||
}
|
||||
|
||||
func impliedType(dec *msgpack.Decoder) (cty.Type, error) {
|
||||
// If this function returns with a nil error then it must have already
|
||||
// consumed the next value from the decoder, since when called recursively
|
||||
// the caller will be expecting to find a following value here.
|
||||
|
||||
code, err := dec.PeekCode()
|
||||
if err != nil {
|
||||
return cty.NilType, err
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case code == msgpackcodes.Nil || msgpackcodes.IsExt(code):
|
||||
err := dec.Skip()
|
||||
return cty.DynamicPseudoType, err
|
||||
|
||||
case code == msgpackcodes.True || code == msgpackcodes.False:
|
||||
_, err := dec.DecodeBool()
|
||||
return cty.Bool, err
|
||||
|
||||
case msgpackcodes.IsFixedNum(code):
|
||||
_, err := dec.DecodeInt64()
|
||||
return cty.Number, err
|
||||
|
||||
case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64:
|
||||
_, err := dec.DecodeInt64()
|
||||
return cty.Number, err
|
||||
|
||||
case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64:
|
||||
_, err := dec.DecodeUint64()
|
||||
return cty.Number, err
|
||||
|
||||
case code == msgpackcodes.Float || code == msgpackcodes.Double:
|
||||
_, err := dec.DecodeFloat64()
|
||||
return cty.Number, err
|
||||
|
||||
case msgpackcodes.IsString(code):
|
||||
_, err := dec.DecodeString()
|
||||
return cty.String, err
|
||||
|
||||
case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32:
|
||||
return impliedObjectType(dec)
|
||||
|
||||
case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32:
|
||||
return impliedTupleType(dec)
|
||||
|
||||
default:
|
||||
return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code)
|
||||
}
|
||||
}
|
||||
|
||||
func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) {
|
||||
// If we get in here then we've already peeked the next code and know
|
||||
// it's some sort of map.
|
||||
l, err := dec.DecodeMapLen()
|
||||
if err != nil {
|
||||
return cty.DynamicPseudoType, nil
|
||||
}
|
||||
|
||||
var atys map[string]cty.Type
|
||||
|
||||
for i := 0; i < l; i++ {
|
||||
// Read the map key first. We require maps to be strings, but msgpack
|
||||
// doesn't so we're prepared to error here if not.
|
||||
k, err := dec.DecodeString()
|
||||
if err != nil {
|
||||
return cty.DynamicPseudoType, err
|
||||
}
|
||||
|
||||
aty, err := impliedType(dec)
|
||||
if err != nil {
|
||||
return cty.DynamicPseudoType, err
|
||||
}
|
||||
|
||||
if atys == nil {
|
||||
atys = make(map[string]cty.Type)
|
||||
}
|
||||
atys[k] = aty
|
||||
}
|
||||
|
||||
if len(atys) == 0 {
|
||||
return cty.EmptyObject, nil
|
||||
}
|
||||
|
||||
return cty.Object(atys), nil
|
||||
}
|
||||
|
||||
func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) {
|
||||
// If we get in here then we've already peeked the next code and know
|
||||
// it's some sort of array.
|
||||
l, err := dec.DecodeArrayLen()
|
||||
if err != nil {
|
||||
return cty.DynamicPseudoType, nil
|
||||
}
|
||||
|
||||
if l == 0 {
|
||||
return cty.EmptyTuple, nil
|
||||
}
|
||||
|
||||
etys := make([]cty.Type, l)
|
||||
|
||||
for i := 0; i < l; i++ {
|
||||
ety, err := impliedType(dec)
|
||||
if err != nil {
|
||||
return cty.DynamicPseudoType, err
|
||||
}
|
||||
etys[i] = ety
|
||||
}
|
||||
|
||||
return cty.Tuple(etys), nil
|
||||
}
|
16
vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go
generated
vendored
16
vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
type unknownType struct{}
|
||||
|
||||
var unknownVal = unknownType{}
|
||||
|
||||
// unknownValBytes is the raw bytes of the msgpack fixext1 value we
|
||||
// write to represent an unknown value. It's an extension value of
|
||||
// type zero whose value is irrelevant. Since it's irrelevant, we
|
||||
// set it to a single byte whose value is also zero, since that's
|
||||
// the most compact possible representation.
|
||||
var unknownValBytes = []byte{0xd4, 0, 0}
|
||||
|
||||
func (uv unknownType) MarshalMsgpack() ([]byte, error) {
|
||||
return unknownValBytes, nil
|
||||
}
|
334
vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go
generated
vendored
334
vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go
generated
vendored
@ -1,334 +0,0 @@
|
||||
package msgpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/vmihailenco/msgpack/v4"
|
||||
msgpackCodes "github.com/vmihailenco/msgpack/v4/codes"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of
|
||||
// the given type, returning the result.
|
||||
//
|
||||
// If an error is returned, the error is written with a hypothetical
|
||||
// end-user that wrote the msgpack file as its audience, using cty type
|
||||
// system concepts rather than Go type system concepts.
|
||||
func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) {
|
||||
r := bytes.NewReader(b)
|
||||
dec := msgpack.NewDecoder(r)
|
||||
|
||||
var path cty.Path
|
||||
return unmarshal(dec, ty, path)
|
||||
}
|
||||
|
||||
func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) {
|
||||
peek, err := dec.PeekCode()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewError(err)
|
||||
}
|
||||
if msgpackCodes.IsExt(peek) {
|
||||
// We just assume _all_ extensions are unknown values,
|
||||
// since we don't have any other extensions.
|
||||
dec.Skip() // skip what we've peeked
|
||||
return cty.UnknownVal(ty), nil
|
||||
}
|
||||
if ty == cty.DynamicPseudoType {
|
||||
return unmarshalDynamic(dec, path)
|
||||
}
|
||||
if peek == msgpackCodes.Nil {
|
||||
dec.Skip() // skip what we've peeked
|
||||
return cty.NullVal(ty), nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case ty.IsPrimitiveType():
|
||||
val, err := unmarshalPrimitive(dec, ty, path)
|
||||
if err != nil {
|
||||
return cty.NilVal, err
|
||||
}
|
||||
return val, nil
|
||||
case ty.IsListType():
|
||||
return unmarshalList(dec, ty.ElementType(), path)
|
||||
case ty.IsSetType():
|
||||
return unmarshalSet(dec, ty.ElementType(), path)
|
||||
case ty.IsMapType():
|
||||
return unmarshalMap(dec, ty.ElementType(), path)
|
||||
case ty.IsTupleType():
|
||||
return unmarshalTuple(dec, ty.TupleElementTypes(), path)
|
||||
case ty.IsObjectType():
|
||||
return unmarshalObject(dec, ty.AttributeTypes(), path)
|
||||
default:
|
||||
return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName())
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) {
|
||||
switch ty {
|
||||
case cty.Bool:
|
||||
rv, err := dec.DecodeBool()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("bool is required")
|
||||
}
|
||||
return cty.BoolVal(rv), nil
|
||||
case cty.Number:
|
||||
// Marshal will try int and float first, if the value can be
|
||||
// losslessly represented in these encodings, and then fall
|
||||
// back on a string if the number is too large or too precise.
|
||||
peek, err := dec.PeekCode()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
|
||||
if msgpackCodes.IsFixedNum(peek) {
|
||||
rv, err := dec.DecodeInt64()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
return cty.NumberIntVal(rv), nil
|
||||
}
|
||||
|
||||
switch peek {
|
||||
case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64:
|
||||
rv, err := dec.DecodeInt64()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
return cty.NumberIntVal(rv), nil
|
||||
case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64:
|
||||
rv, err := dec.DecodeUint64()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
return cty.NumberUIntVal(rv), nil
|
||||
case msgpackCodes.Float, msgpackCodes.Double:
|
||||
rv, err := dec.DecodeFloat64()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
return cty.NumberFloatVal(rv), nil
|
||||
default:
|
||||
rv, err := dec.DecodeString()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
v, err := cty.ParseNumberVal(rv)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("number is required")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
case cty.String:
|
||||
rv, err := dec.DecodeString()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("string is required")
|
||||
}
|
||||
return cty.StringVal(rv), nil
|
||||
default:
|
||||
// should never happen
|
||||
panic("unsupported primitive type")
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeArrayLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("a list is required")
|
||||
}
|
||||
|
||||
switch {
|
||||
case length < 0:
|
||||
return cty.NullVal(cty.List(ety)), nil
|
||||
case length == 0:
|
||||
return cty.ListValEmpty(ety), nil
|
||||
}
|
||||
|
||||
vals := make([]cty.Value, 0, length)
|
||||
path = append(path, nil)
|
||||
for i := 0; i < length; i++ {
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: cty.NumberIntVal(int64(i)),
|
||||
}
|
||||
|
||||
val, err := unmarshal(dec, ety, path)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, err
|
||||
}
|
||||
|
||||
vals = append(vals, val)
|
||||
}
|
||||
|
||||
return cty.ListVal(vals), nil
|
||||
}
|
||||
|
||||
func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeArrayLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("a set is required")
|
||||
}
|
||||
|
||||
switch {
|
||||
case length < 0:
|
||||
return cty.NullVal(cty.Set(ety)), nil
|
||||
case length == 0:
|
||||
return cty.SetValEmpty(ety), nil
|
||||
}
|
||||
|
||||
vals := make([]cty.Value, 0, length)
|
||||
path = append(path, nil)
|
||||
for i := 0; i < length; i++ {
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: cty.NumberIntVal(int64(i)),
|
||||
}
|
||||
|
||||
val, err := unmarshal(dec, ety, path)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, err
|
||||
}
|
||||
|
||||
vals = append(vals, val)
|
||||
}
|
||||
|
||||
return cty.SetVal(vals), nil
|
||||
}
|
||||
|
||||
func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeMapLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("a map is required")
|
||||
}
|
||||
|
||||
switch {
|
||||
case length < 0:
|
||||
return cty.NullVal(cty.Map(ety)), nil
|
||||
case length == 0:
|
||||
return cty.MapValEmpty(ety), nil
|
||||
}
|
||||
|
||||
vals := make(map[string]cty.Value, length)
|
||||
path = append(path, nil)
|
||||
for i := 0; i < length; i++ {
|
||||
key, err := dec.DecodeString()
|
||||
if err != nil {
|
||||
path[:len(path)-1].NewErrorf("non-string key in map")
|
||||
}
|
||||
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: cty.StringVal(key),
|
||||
}
|
||||
|
||||
val, err := unmarshal(dec, ety, path)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, err
|
||||
}
|
||||
|
||||
vals[key] = val
|
||||
}
|
||||
|
||||
return cty.MapVal(vals), nil
|
||||
}
|
||||
|
||||
func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeArrayLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("a tuple is required")
|
||||
}
|
||||
|
||||
switch {
|
||||
case length < 0:
|
||||
return cty.NullVal(cty.Tuple(etys)), nil
|
||||
case length == 0:
|
||||
return cty.TupleVal(nil), nil
|
||||
case length != len(etys):
|
||||
return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys))
|
||||
}
|
||||
|
||||
vals := make([]cty.Value, 0, length)
|
||||
path = append(path, nil)
|
||||
for i := 0; i < length; i++ {
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: cty.NumberIntVal(int64(i)),
|
||||
}
|
||||
ety := etys[i]
|
||||
|
||||
val, err := unmarshal(dec, ety, path)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, err
|
||||
}
|
||||
|
||||
vals = append(vals, val)
|
||||
}
|
||||
|
||||
return cty.TupleVal(vals), nil
|
||||
}
|
||||
|
||||
func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeMapLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewErrorf("an object is required")
|
||||
}
|
||||
|
||||
switch {
|
||||
case length < 0:
|
||||
return cty.NullVal(cty.Object(atys)), nil
|
||||
case length == 0:
|
||||
return cty.ObjectVal(nil), nil
|
||||
case length != len(atys):
|
||||
return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)",
|
||||
len(atys), length)
|
||||
}
|
||||
|
||||
vals := make(map[string]cty.Value, length)
|
||||
path = append(path, nil)
|
||||
for i := 0; i < length; i++ {
|
||||
key, err := dec.DecodeString()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings")
|
||||
}
|
||||
|
||||
path[len(path)-1] = cty.IndexStep{
|
||||
Key: cty.StringVal(key),
|
||||
}
|
||||
aty, exists := atys[key]
|
||||
if !exists {
|
||||
return cty.DynamicVal, path.NewErrorf("unsupported attribute")
|
||||
}
|
||||
|
||||
val, err := unmarshal(dec, aty, path)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, err
|
||||
}
|
||||
|
||||
vals[key] = val
|
||||
}
|
||||
|
||||
return cty.ObjectVal(vals), nil
|
||||
}
|
||||
|
||||
func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) {
|
||||
length, err := dec.DecodeArrayLen()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewError(err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case length == -1:
|
||||
return cty.NullVal(cty.DynamicPseudoType), nil
|
||||
case length != 2:
|
||||
return cty.DynamicVal, path.NewErrorf(
|
||||
"dynamic value array must have exactly two elements",
|
||||
)
|
||||
}
|
||||
|
||||
typeJSON, err := dec.DecodeBytes()
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewError(err)
|
||||
}
|
||||
var ty cty.Type
|
||||
err = (&ty).UnmarshalJSON(typeJSON)
|
||||
if err != nil {
|
||||
return cty.DynamicVal, path.NewError(err)
|
||||
}
|
||||
|
||||
return unmarshal(dec, ty, path)
|
||||
}
|
Reference in New Issue
Block a user