Merge pull request #1620 from terraform-providers/paddy_update_vendor

Update schema to 0.11.7.
This commit is contained in:
Paddy 2018-06-11 13:08:54 -07:00 committed by GitHub
commit f6f062d696
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 1242 additions and 373 deletions

View File

@ -231,7 +231,10 @@ func (r *Resource) Count() (int, error) {
v, err := strconv.ParseInt(count, 0, 0)
if err != nil {
return 0, err
return 0, fmt.Errorf(
"cannot parse %q as an integer",
count,
)
}
return int(v), nil

View File

@ -3,6 +3,7 @@ package module
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/hashicorp/go-getter"
)
@ -37,13 +38,10 @@ func GetCopy(dst, src string) error {
if err != nil {
return err
}
// FIXME: This isn't completely safe. Creating and removing our temp path
// exposes where to race to inject files.
if err := os.RemoveAll(tmpDir); err != nil {
return err
}
defer os.RemoveAll(tmpDir)
tmpDir = filepath.Join(tmpDir, "module")
// Get to that temporary dir
if err := getter.Get(tmpDir, src); err != nil {
return err

View File

@ -67,7 +67,7 @@ func (t *Tree) validateProviderAlias() error {
// We didn't find the alias, error!
err = multierror.Append(err, fmt.Errorf(
"module %s: provider alias must be defined by the module or a parent: %s",
"module %s: provider alias must be defined by the module: %s",
strings.Join(pv.Path, "."), k))
}
}

View File

@ -18,6 +18,11 @@ func UniqueId() string {
return PrefixedUniqueId(UniqueIdPrefix)
}
// UniqueIDSuffixLength is the string length of the suffix generated by
// PrefixedUniqueId. This can be used by length validation functions to
// ensure prefixes are the correct length for the target field.
const UniqueIDSuffixLength = 26
// Helper for a resource to generate a unique identifier w/ given prefix
//
// After the prefix, the ID consists of an incrementing 26 digit value (to match

View File

@ -310,6 +310,11 @@ type TestStep struct {
// no-op plans
PlanOnly bool
// PreventDiskCleanup can be set to true for testing terraform modules which
// require access to disk at runtime. Note that this will leave files in the
// temp folder
PreventDiskCleanup bool
// PreventPostDestroyRefresh can be set to true for cases where data sources
// are tested alongside real resources
PreventPostDestroyRefresh bool
@ -564,6 +569,7 @@ func Test(t TestT, c TestCase) {
Config: lastStep.Config,
Check: c.CheckDestroy,
Destroy: true,
PreventDiskCleanup: lastStep.PreventDiskCleanup,
PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
}
@ -730,9 +736,7 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
return nil
}
func testModule(
opts terraform.ContextOpts,
step TestStep) (*module.Tree, error) {
func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) {
if step.PreConfig != nil {
step.PreConfig()
}
@ -742,7 +746,12 @@ func testModule(
return nil, fmt.Errorf(
"Error creating temporary directory for config: %s", err)
}
defer os.RemoveAll(cfgPath)
if step.PreventDiskCleanup {
log.Printf("[INFO] Skipping defer os.RemoveAll call")
} else {
defer os.RemoveAll(cfgPath)
}
// Write the configuration
cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
@ -1135,6 +1144,10 @@ func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, n
// given resource name in a given module path.
func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) {
ms := s.ModuleByPath(mp)
if ms == nil {
return nil, fmt.Errorf("No module found at: %s", mp)
}
return modulePrimaryInstanceState(s, ms, name)
}

View File

@ -74,7 +74,7 @@ func RetryableError(err error) *RetryError {
return &RetryError{Err: err, Retryable: true}
}
// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
// NonRetryableError is a helper to create a RetryError that's _not_ retryable
// from a given error.
func NonRetryableError(err error) *RetryError {
if err == nil {

View File

@ -126,6 +126,8 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
switch v := current.Elem.(type) {
case ValueType:
current = &Schema{Type: v}
case *Schema:
current, _ = current.Elem.(*Schema)
default:
// maps default to string values. This is all we can have
// if this is nested in another list or map.
@ -249,11 +251,10 @@ func readObjectField(
}
// convert map values to the proper primitive type based on schema.Elem
func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
elemType := TypeString
if et, ok := schema.Elem.(ValueType); ok {
elemType = et
func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error {
elemType, err := getValueType(k, schema)
if err != nil {
return err
}
switch elemType {

View File

@ -206,7 +206,7 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult,
panic(fmt.Sprintf("unknown type: %#v", mraw))
}
err := mapValuesToPrimitive(result, schema)
err := mapValuesToPrimitive(k, result, schema)
if err != nil {
return FieldReadResult{}, nil
}

View File

@ -122,7 +122,8 @@ func (r *DiffFieldReader) readMap(
result[k] = v.New
}
err = mapValuesToPrimitive(result, schema)
key := address[len(address)-1]
err = mapValuesToPrimitive(key, result, schema)
if err != nil {
return FieldReadResult{}, nil
}

View File

@ -61,7 +61,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err
return true
})
err := mapValuesToPrimitive(result, schema)
err := mapValuesToPrimitive(k, result, schema)
if err != nil {
return FieldReadResult{}, nil
}

View File

@ -59,7 +59,7 @@ type Provider struct {
meta interface{}
// a mutex is required because TestReset can directly repalce the stopCtx
// a mutex is required because TestReset can directly replace the stopCtx
stopMu sync.Mutex
stopCtx context.Context
stopCtxCancel context.CancelFunc

View File

@ -492,6 +492,12 @@ func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
panic(err)
}
// load the Resource timeouts
result.timeouts = r.Timeouts
if result.timeouts == nil {
result.timeouts = &ResourceTimeout{}
}
// Set the schema version to latest by default
result.meta = map[string]interface{}{
"schema_version": strconv.Itoa(r.SchemaVersion),

View File

@ -366,6 +366,13 @@ func (d *ResourceData) State() *terraform.InstanceState {
func (d *ResourceData) Timeout(key string) time.Duration {
key = strings.ToLower(key)
// System default of 20 minutes
defaultTimeout := 20 * time.Minute
if d.timeouts == nil {
return defaultTimeout
}
var timeout *time.Duration
switch key {
case TimeoutCreate:
@ -386,8 +393,7 @@ func (d *ResourceData) Timeout(key string) time.Duration {
return *d.timeouts.Default
}
// Return system default of 20 minutes
return 20 * time.Minute
return defaultTimeout
}
func (d *ResourceData) init() {

View File

@ -135,6 +135,10 @@ type ResourceDiff struct {
// diff does not get re-run on keys that were not touched, or diffs that were
// just removed (re-running on the latter would just roll back the removal).
updatedKeys map[string]bool
// Tracks which keys were flagged as forceNew. These keys are not saved in
// newWriter, but we need to track them so that they can be re-diffed later.
forcedNewKeys map[string]bool
}
// newResourceDiff creates a new ResourceDiff instance.
@ -193,17 +197,30 @@ func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig
}
d.updatedKeys = make(map[string]bool)
d.forcedNewKeys = make(map[string]bool)
return d
}
// UpdatedKeys returns the keys that were updated by this ResourceDiff run.
// These are the only keys that a diff should be re-calculated for.
//
// This is the combined result of both keys for which diff values were updated
// for or cleared, and also keys that were flagged to be re-diffed as a result
// of ForceNew.
func (d *ResourceDiff) UpdatedKeys() []string {
var s []string
for k := range d.updatedKeys {
s = append(s, k)
}
for k := range d.forcedNewKeys {
for _, l := range s {
if k == l {
break
}
}
s = append(s, k)
}
return s
}
@ -223,9 +240,11 @@ func (d *ResourceDiff) Clear(key string) error {
func (d *ResourceDiff) clear(key string) error {
// Check the schema to make sure that this key exists first.
if _, ok := d.schema[key]; !ok {
schemaL := addrToSchema(strings.Split(key, "."), d.schema)
if len(schemaL) == 0 {
return fmt.Errorf("%s is not a valid key", key)
}
for k := range d.diff.Attributes {
if strings.HasPrefix(k, key) {
delete(d.diff.Attributes, k)
@ -234,6 +253,19 @@ func (d *ResourceDiff) clear(key string) error {
return nil
}
// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff
// where we need to act on all nested fields
// without calling out each one separately
func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string {
keys := make([]string, 0)
for k := range d.diff.Attributes {
if strings.HasPrefix(k, prefix) {
keys = append(keys, k)
}
}
return keys
}
// diffChange helps to implement resourceDiffer and derives its change values
// from ResourceDiff's own change data, in addition to existing diff, config, and state.
func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) {
@ -242,7 +274,7 @@ func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, b
if !old.Exists {
old.Value = nil
}
if !new.Exists {
if !new.Exists || d.removed(key) {
new.Value = nil
}
@ -309,9 +341,23 @@ func (d *ResourceDiff) ForceNew(key string) error {
return fmt.Errorf("ForceNew: No changes for %s", key)
}
_, new := d.GetChange(key)
d.schema[key].ForceNew = true
return d.setDiff(key, new, false)
keyParts := strings.Split(key, ".")
var schema *Schema
schemaL := addrToSchema(keyParts, d.schema)
if len(schemaL) > 0 {
schema = schemaL[len(schemaL)-1]
} else {
return fmt.Errorf("ForceNew: %s is not a valid key", key)
}
schema.ForceNew = true
// Flag this for a re-diff. Don't save any values to guarantee that existing
// diffs aren't messed with, as this gets messy when dealing with complex
// structures, zero values, etc.
d.forcedNewKeys[keyParts[0]] = true
return nil
}
// Get hands off to ResourceData.Get.
@ -352,6 +398,29 @@ func (d *ResourceDiff) GetOk(key string) (interface{}, bool) {
return r.Value, exists
}
// GetOkExists functions the same way as GetOkExists within ResourceData, but
// it also checks the new diff levels to provide data consistent with the
// current state of the customized diff.
//
// This is nearly the same function as GetOk, yet it does not check
// for the zero value of the attribute's type. This allows for attributes
// without a default, to fully check for a literal assignment, regardless
// of the zero-value for that type.
func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) {
r := d.get(strings.Split(key, "."), "newDiff")
exists := r.Exists && !r.Computed
return r.Value, exists
}
// NewValueKnown returns true if the new value for the given key is available
// as its final value at diff time. If the return value is false, this means
// either the value is based of interpolation that was unavailable at diff
// time, or that the value was explicitly marked as computed by SetNewComputed.
func (d *ResourceDiff) NewValueKnown(key string) bool {
r := d.get(strings.Split(key, "."), "newDiff")
return !r.Computed
}
// HasChange checks to see if there is a change between state and the diff, or
// in the overridden diff.
func (d *ResourceDiff) HasChange(key string) bool {
@ -400,6 +469,16 @@ func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) {
return old, new, false
}
// removed checks to see if the key is present in the existing, pre-customized
// diff and if it was marked as NewRemoved.
func (d *ResourceDiff) removed(k string) bool {
diff, ok := d.diff.Attributes[k]
if !ok {
return false
}
return diff.NewRemoved
}
// get performs the appropriate multi-level reader logic for ResourceDiff,
// starting at source. Refer to newResourceDiff for the level order.
func (d *ResourceDiff) get(addr []string, source string) getResult {

View File

@ -395,7 +395,7 @@ func (m *schemaMap) DeepCopy() schemaMap {
if err != nil {
panic(err)
}
return copy.(schemaMap)
return *copy.(*schemaMap)
}
// Diff returns the diff for a resource given the schema map,
@ -427,6 +427,13 @@ func (m schemaMap) Diff(
}
}
// Remove any nil diffs just to keep things clean
for k, v := range result.Attributes {
if v == nil {
delete(result.Attributes, k)
}
}
// If this is a non-destroy diff, call any custom diff logic that has been
// defined.
if !result.DestroyTainted && customizeDiff != nil {
@ -521,13 +528,6 @@ func (m schemaMap) Diff(
result = result2
}
// Remove any nil diffs just to keep things clean
for k, v := range result.Attributes {
if v == nil {
delete(result.Attributes, k)
}
}
// Go through and detect all of the ComputedWhens now that we've
// finished the diff.
// TODO
@ -1270,9 +1270,9 @@ func (m schemaMap) validateConflictingAttributes(
}
for _, conflicting_key := range schema.ConflictsWith {
if value, ok := c.Get(conflicting_key); ok {
if _, ok := c.Get(conflicting_key); ok {
return fmt.Errorf(
"%q: conflicts with %s (%#v)", k, conflicting_key, value)
"%q: conflicts with %s", k, conflicting_key)
}
}
@ -1461,13 +1461,10 @@ func getValueType(k string, schema *Schema) (ValueType, error) {
return vt, nil
}
// If a Schema is provided to a Map, we use the Type of that schema
// as the type for each element in the Map.
if s, ok := schema.Elem.(*Schema); ok {
if s.Elem == nil {
return TypeString, nil
}
if vt, ok := s.Elem.(ValueType); ok {
return vt, nil
}
return s.Type, nil
}
if _, ok := schema.Elem.(*Resource); ok {

View File

@ -1,11 +1,13 @@
package validation
import (
"bytes"
"fmt"
"net"
"reflect"
"regexp"
"strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/structure"
@ -179,6 +181,51 @@ func CIDRNetwork(min, max int) schema.SchemaValidateFunc {
}
}
// SingleIP returns a SchemaValidateFunc which tests if the provided value
// is of type string, and in valid single IP notation
func SingleIP() schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(string)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be string", k))
return
}
ip := net.ParseIP(v)
if ip == nil {
es = append(es, fmt.Errorf(
"expected %s to contain a valid IP, got: %s", k, v))
}
return
}
}
// IPRange returns a SchemaValidateFunc which tests if the provided value
// is of type string, and in valid IP range notation
func IPRange() schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(string)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be string", k))
return
}
ips := strings.Split(v, "-")
if len(ips) != 2 {
es = append(es, fmt.Errorf(
"expected %s to contain a valid IP range, got: %s", k, v))
return
}
ip1 := net.ParseIP(ips[0])
ip2 := net.ParseIP(ips[1])
if ip1 == nil || ip2 == nil || bytes.Compare(ip1, ip2) > 0 {
es = append(es, fmt.Errorf(
"expected %s to contain a valid IP range, got: %s", k, v))
}
return
}
}
// ValidateJsonString is a SchemaValidateFunc which tests to make sure the
// supplied string is valid JSON.
func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) {
@ -210,3 +257,12 @@ func ValidateRegexp(v interface{}, k string) (ws []string, errors []error) {
}
return
}
// ValidateRFC3339TimeString is a ValidateFunc that ensures a string parses
// as time.RFC3339 format
func ValidateRFC3339TimeString(v interface{}, k string) (ws []string, errors []error) {
if _, err := time.Parse(time.RFC3339, v.(string)); err != nil {
errors = append(errors, fmt.Errorf("%q: invalid RFC3339 timestamp", k))
}
return
}

View File

@ -0,0 +1,18 @@
package httpclient
import (
"net/http"
cleanhttp "github.com/hashicorp/go-cleanhttp"
)
// New returns the DefaultPooledClient from the cleanhttp
// package that will also send a Terraform User-Agent string.
func New() *http.Client {
cli := cleanhttp.DefaultPooledClient()
cli.Transport = &userAgentRoundTripper{
userAgent: UserAgentString(),
inner: cli.Transport,
}
return cli
}

View File

@ -0,0 +1,40 @@
package httpclient
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/hashicorp/terraform/version"
)
const userAgentFormat = "Terraform/%s"
const uaEnvVar = "TF_APPEND_USER_AGENT"
func UserAgentString() string {
ua := fmt.Sprintf(userAgentFormat, version.Version)
if add := os.Getenv(uaEnvVar); add != "" {
add = strings.TrimSpace(add)
if len(add) > 0 {
ua += " " + add
log.Printf("[DEBUG] Using modified User-Agent: %s", ua)
}
}
return ua
}
type userAgentRoundTripper struct {
inner http.RoundTripper
userAgent string
}
func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", rt.userAgent)
}
return rt.inner.RoundTrip(req)
}

View File

@ -95,7 +95,7 @@ func findPluginPaths(kind string, dirs []string) []string {
continue
}
log.Printf("[WARNING] found legacy %s %q", kind, fullName)
log.Printf("[WARN] found legacy %s %q", kind, fullName)
ret = append(ret, filepath.Clean(absPath))
}

View File

@ -15,9 +15,9 @@ import (
"golang.org/x/net/html"
cleanhttp "github.com/hashicorp/go-cleanhttp"
getter "github.com/hashicorp/go-getter"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/httpclient"
"github.com/mitchellh/cli"
)
@ -33,7 +33,19 @@ const protocolVersionHeader = "x-terraform-protocol-version"
var releaseHost = "https://releases.hashicorp.com"
var httpClient = cleanhttp.DefaultPooledClient()
var httpClient *http.Client
func init() {
httpClient = httpclient.New()
httpGetter := &getter.HttpGetter{
Client: httpClient,
Netrc: true,
}
getter.Getters["http"] = httpGetter
getter.Getters["https"] = httpGetter
}
// An Installer maintains a local cache of plugins by downloading plugins
// from an online repository.
@ -369,7 +381,7 @@ func checkPlugin(url string, pluginProtocolVersion uint) bool {
if proto == "" {
// The header isn't present, but we don't make this error fatal since
// the latest version will probably work.
log.Printf("[WARNING] missing %s from: %s", protocolVersionHeader, url)
log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url)
return true
}

View File

@ -11,7 +11,7 @@ import (
"strings"
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/httpclient"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/registry/response"
"github.com/hashicorp/terraform/svchost"
@ -51,11 +51,11 @@ func NewClient(services *disco.Disco, creds auth.CredentialsSource, client *http
services.SetCredentialsSource(creds)
if client == nil {
client = cleanhttp.DefaultPooledClient()
client = httpclient.New()
client.Timeout = requestTimeout
}
services.Transport = client.Transport.(*http.Transport)
services.Transport = client.Transport
return &Client{
client: client,
@ -67,6 +67,9 @@ func NewClient(services *disco.Disco, creds auth.CredentialsSource, client *http
// Discover qeuries the host, and returns the url for the registry.
func (c *Client) Discover(host svchost.Hostname) *url.URL {
service := c.services.DiscoverServiceURL(host, serviceID)
if service == nil {
return nil
}
if !strings.HasSuffix(service.Path, "/") {
service.Path += "/"
}
@ -112,7 +115,7 @@ func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, erro
case http.StatusOK:
// OK
case http.StatusNotFound:
return nil, fmt.Errorf("module %q not found", module.String())
return nil, &errModuleNotFound{addr: module}
default:
return nil, fmt.Errorf("error looking up module versions: %s", resp.Status)
}
@ -140,7 +143,7 @@ func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) {
creds, err := c.creds.ForHost(host)
if err != nil {
log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err)
log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err)
return
}

View File

@ -0,0 +1,23 @@
package registry
import (
"fmt"
"github.com/hashicorp/terraform/registry/regsrc"
)
type errModuleNotFound struct {
addr *regsrc.Module
}
func (e *errModuleNotFound) Error() string {
return fmt.Sprintf("module %s not found", e.addr)
}
// IsModuleNotFound returns true only if the given error is a "module not found"
// error. This allows callers to recognize this particular error condition
// as distinct from operational errors such as poor network connectivity.
func IsModuleNotFound(err error) bool {
_, ok := err.(*errModuleNotFound)
return ok
}

View File

@ -8,7 +8,6 @@ package disco
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
@ -20,17 +19,15 @@ import (
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/version"
)
const (
discoPath = "/.well-known/terraform.json"
maxRedirects = 3 // arbitrary-but-small number to prevent runaway redirect loops
discoTimeout = 4 * time.Second // arbitrary-but-small time limit to prevent UI "hangs" during discovery
maxDiscoDocBytes = 1 * 1024 * 1024 // 1MB - to prevent abusive services from using loads of our memory
maxRedirects = 3 // arbitrary-but-small number to prevent runaway redirect loops
discoTimeout = 11 * time.Second // arbitrary-but-small time limit to prevent UI "hangs" during discovery
maxDiscoDocBytes = 1 * 1024 * 1024 // 1MB - to prevent abusive services from using loads of our memory
)
var userAgent = fmt.Sprintf("Terraform/%s (service discovery)", version.String())
var httpTransport = cleanhttp.DefaultPooledTransport() // overridden during tests, to skip TLS verification
// Disco is the main type in this package, which allows discovery on given
@ -40,9 +37,9 @@ type Disco struct {
hostCache map[svchost.Hostname]Host
credsSrc auth.CredentialsSource
// Transport is a custom http.Transport to use.
// Transport is a custom http.RoundTripper to use.
// A package default is used if this is nil.
Transport *http.Transport
Transport http.RoundTripper
}
func NewDisco() *Disco {
@ -142,13 +139,9 @@ func (d *Disco) discover(host svchost.Hostname) Host {
},
}
var header = http.Header{}
header.Set("User-Agent", userAgent)
req := &http.Request{
Method: "GET",
URL: discoURL,
Header: header,
}
if d.credsSrc != nil {
@ -158,7 +151,7 @@ func (d *Disco) discover(host svchost.Hostname) Host {
creds.PrepareRequest(req) // alters req to include credentials
}
} else {
log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err)
log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err)
}
}
@ -170,11 +163,11 @@ func (d *Disco) discover(host svchost.Hostname) Host {
resp, err := client.Do(req)
if err != nil {
log.Printf("[WARNING] Failed to request discovery document: %s", err)
log.Printf("[WARN] Failed to request discovery document: %s", err)
return ret // empty
}
if resp.StatusCode != 200 {
log.Printf("[WARNING] Failed to request discovery document: %s", resp.Status)
log.Printf("[WARN] Failed to request discovery document: %s", resp.Status)
return ret // empty
}
@ -185,7 +178,7 @@ func (d *Disco) discover(host svchost.Hostname) Host {
contentType := resp.Header.Get("Content-Type")
mediaType, _, err := mime.ParseMediaType(contentType)
if err != nil {
log.Printf("[WARNING] Discovery URL has malformed Content-Type %q", contentType)
log.Printf("[WARN] Discovery URL has malformed Content-Type %q", contentType)
return ret // empty
}
if mediaType != "application/json" {
@ -197,7 +190,7 @@ func (d *Disco) discover(host svchost.Hostname) Host {
if resp.ContentLength > maxDiscoDocBytes {
// Size limit here is not a contractual requirement and so we may
// adjust it over time if we find a different limit is warranted.
log.Printf("[WARNING] Discovery doc response is too large (got %d bytes; limit %d)", resp.ContentLength, maxDiscoDocBytes)
log.Printf("[WARN] Discovery doc response is too large (got %d bytes; limit %d)", resp.ContentLength, maxDiscoDocBytes)
return ret // empty
}
@ -208,14 +201,14 @@ func (d *Disco) discover(host svchost.Hostname) Host {
servicesBytes, err := ioutil.ReadAll(lr)
if err != nil {
log.Printf("[WARNING] Error reading discovery document body: %s", err)
log.Printf("[WARN] Error reading discovery document body: %s", err)
return ret // empty
}
var services map[string]interface{}
err = json.Unmarshal(servicesBytes, &services)
if err != nil {
log.Printf("[WARNING] Failed to decode discovery document as a JSON object: %s", err)
log.Printf("[WARN] Failed to decode discovery document as a JSON object: %s", err)
return ret // empty
}

View File

@ -487,6 +487,13 @@ func (c *Context) Input(mode InputMode) error {
func (c *Context) Apply() (*State, error) {
defer c.acquireRun("apply")()
// Check there are no empty target parameter values
for _, target := range c.targets {
if target == "" {
return nil, fmt.Errorf("Target parameter must not have empty value")
}
}
// Copy our own state
c.state = c.state.DeepCopy()
@ -524,6 +531,13 @@ func (c *Context) Apply() (*State, error) {
func (c *Context) Plan() (*Plan, error) {
defer c.acquireRun("plan")()
// Check there are no empty target parameter values
for _, target := range c.targets {
if target == "" {
return nil, fmt.Errorf("Target parameter must not have empty value")
}
}
p := &Plan{
Module: c.module,
Vars: c.variables,

View File

@ -396,11 +396,6 @@ type ResourceAttrDiff struct {
Type DiffAttrType
}
// Modified returns the inequality of Old and New for this attr
func (d *ResourceAttrDiff) Modified() bool {
return d.Old != d.New
}
// Empty returns true if the diff for this attr is neutral
func (d *ResourceAttrDiff) Empty() bool {
return d.Old == d.New && !d.NewComputed && !d.NewRemoved

View File

@ -227,11 +227,8 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
state.Tainted = true
}
if n.Error != nil {
*n.Error = multierror.Append(*n.Error, err)
} else {
return nil, err
}
*n.Error = multierror.Append(*n.Error, err)
return nil, err
}
{

View File

@ -256,13 +256,15 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
containers := groupContainers(diff)
keep := map[string]bool{}
for _, v := range containers {
if v.keepDiff() {
if v.keepDiff(ignorableAttrKeys) {
// At least one key has changes, so list all the sibling keys
// to keep in the diff if any values have changed
// to keep in the diff
for k := range v {
if v[k].Modified() {
keep[k] = true
}
keep[k] = true
// this key may have been added by the user to ignore, but
// if it's a subkey in a container, we need to un-ignore it
// to keep the complete containter.
delete(ignorableAttrKeys, k)
}
}
}
@ -294,10 +296,17 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
type flatAttrDiff map[string]*ResourceAttrDiff
// we need to keep all keys if any of them have a diff
func (f flatAttrDiff) keepDiff() bool {
for _, v := range f {
if !v.Empty() && !v.NewComputed {
// we need to keep all keys if any of them have a diff that's not ignored
func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool {
for k, v := range f {
ignore := false
for attr := range ignoreChanges {
if strings.HasPrefix(k, attr) {
ignore = true
}
}
if !v.Empty() && !v.NewComputed && !ignore {
return true
}
}

View File

@ -144,8 +144,10 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
// For type=ssh only (enforced in ssh communicator)
PrivateKey interface{} `mapstructure:"private_key"`
HostKey interface{} `mapstructure:"host_key"`
Agent interface{} `mapstructure:"agent"`
BastionHost interface{} `mapstructure:"bastion_host"`
BastionHostKey interface{} `mapstructure:"bastion_host_key"`
BastionPort interface{} `mapstructure:"bastion_port"`
BastionUser interface{} `mapstructure:"bastion_user"`
BastionPassword interface{} `mapstructure:"bastion_password"`
@ -155,6 +157,7 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
// For type=winrm only (enforced in winrm communicator)
HTTPS interface{} `mapstructure:"https"`
Insecure interface{} `mapstructure:"insecure"`
NTLM interface{} `mapstructure:"use_ntlm"`
CACert interface{} `mapstructure:"cacert"`
}

View File

@ -119,11 +119,19 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
// Connect references so ordering is correct
&ReferenceTransformer{},
// Reverse the edges to outputs and locals, so that
// Handle destroy time transformations for output and local values.
// Reverse the edges from outputs and locals, so that
// interpolations don't fail during destroy.
// Create a destroy node for outputs to remove them from the state.
// Prune unreferenced values, which may have interpolations that can't
// be resolved.
GraphTransformIf(
func() bool { return b.Destroy },
&DestroyValueReferenceTransformer{},
GraphTransformMulti(
&DestroyValueReferenceTransformer{},
&DestroyOutputTransformer{},
&PruneUnusedValuesTransformer{},
),
),
// Add the node to fix the state count boundaries

View File

@ -518,6 +518,16 @@ func (i *Interpolater) computeResourceVariable(
return &v, err
}
// special case for the "id" field which is usually also an attribute
if v.Field == "id" && r.Primary.ID != "" {
// This is usually pulled from the attributes, but is sometimes missing
// during destroy. We can return the ID field in this case.
// FIXME: there should only be one ID to rule them all.
log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId())
v, err := hil.InterfaceToVariable(r.Primary.ID)
return &v, err
}
// computed list or map attribute
_, isList = r.Primary.Attributes[v.Field+".#"]
_, isMap = r.Primary.Attributes[v.Field+".%"]
@ -655,6 +665,11 @@ func (i *Interpolater) computeResourceMultiVariable(
continue
}
if v.Field == "id" && r.Primary.ID != "" {
log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId())
values = append(values, r.Primary.ID)
}
// computed list or map attribute
_, isList := r.Primary.Attributes[v.Field+".#"]
_, isMap := r.Primary.Attributes[v.Field+".%"]
@ -774,7 +789,8 @@ func (i *Interpolater) resourceCountMax(
// If we're NOT applying, then we assume we can read the count
// from the state. Plan and so on may not have any state yet so
// we do a full interpolation.
if i.Operation != walkApply {
// Don't forget walkDestroy, which is a special case of walkApply
if !(i.Operation == walkApply || i.Operation == walkDestroy) {
if cr == nil {
return 0, nil
}
@ -805,7 +821,13 @@ func (i *Interpolater) resourceCountMax(
// use "cr.Count()" but that doesn't work if the count is interpolated
// and we can't guarantee that so we instead depend on the state.
max := -1
for k, _ := range ms.Resources {
for k, s := range ms.Resources {
// This resource may have been just removed, in which case the Primary
// may be nil, or just empty.
if s == nil || s.Primary == nil || len(s.Primary.Attributes) == 0 {
continue
}
// Get the index number for this resource
index := ""
if k == id {

View File

@ -59,38 +59,8 @@ func (n *NodeLocal) References() []string {
// GraphNodeEvalable
func (n *NodeLocal) EvalTree() EvalNode {
return &EvalSequence{
Nodes: []EvalNode{
&EvalOpFilter{
Ops: []walkOperation{
walkInput,
walkValidate,
walkRefresh,
walkPlan,
walkApply,
},
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalLocal{
Name: n.Config.Name,
Value: n.Config.RawConfig,
},
},
},
},
&EvalOpFilter{
Ops: []walkOperation{
walkPlanDestroy,
walkDestroy,
},
Node: &EvalSequence{
Nodes: []EvalNode{
&EvalDeleteLocal{
Name: n.Config.Name,
},
},
},
},
},
return &EvalLocal{
Name: n.Config.Name,
Value: n.Config.RawConfig,
}
}

View File

@ -83,19 +83,71 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
},
},
&EvalOpFilter{
Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate},
Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy},
Node: &EvalWriteOutput{
Name: n.Config.Name,
Sensitive: n.Config.Sensitive,
Value: n.Config.RawConfig,
},
},
&EvalOpFilter{
Ops: []walkOperation{walkDestroy, walkPlanDestroy},
Node: &EvalDeleteOutput{
Name: n.Config.Name,
},
},
},
}
}
// NodeDestroyableOutput represents an output that is "destroybale":
// its application will remove the output from the state.
type NodeDestroyableOutput struct {
PathValue []string
Config *config.Output // Config is the output in the config
}
func (n *NodeDestroyableOutput) Name() string {
result := fmt.Sprintf("output.%s (destroy)", n.Config.Name)
if len(n.PathValue) > 1 {
result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
}
return result
}
// GraphNodeSubPath
func (n *NodeDestroyableOutput) Path() []string {
return n.PathValue
}
// RemovableIfNotTargeted
func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool {
// We need to add this so that this node will be removed if
// it isn't targeted or a dependency of a target.
return true
}
// This will keep the destroy node in the graph if its corresponding output
// node is also in the destroy graph.
func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool {
return true
}
// GraphNodeReferencer
func (n *NodeDestroyableOutput) References() []string {
var result []string
result = append(result, n.Config.DependsOn...)
result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
for _, v := range result {
split := strings.Split(v, "/")
for i, s := range split {
split[i] = s + ".destroy"
}
result = append(result, strings.Join(split, "/"))
}
return result
}
// GraphNodeEvalable
func (n *NodeDestroyableOutput) EvalTree() EvalNode {
return &EvalDeleteOutput{
Name: n.Config.Name,
}
}

View File

@ -124,6 +124,27 @@ func (n *NodeApplyableResource) evalTreeDataResource(
Then: EvalNoop{},
},
// Normally we interpolate count as a preparation step before
// a DynamicExpand, but an apply graph has pre-expanded nodes
// and so the count would otherwise never be interpolated.
//
// This is redundant when there are multiple instances created
// from the same config (count > 1) but harmless since the
// underlying structures have mutexes to make this concurrency-safe.
//
// In most cases this isn't actually needed because we dealt with
// all of the counts during the plan walk, but we do it here
// for completeness because other code assumes that the
// final count is always available during interpolation.
//
// Here we are just populating the interpolated value in-place
// inside this RawConfig object, like we would in
// NodeAbstractCountResource.
&EvalInterpolate{
Config: n.Config.RawCount,
ContinueOnErr: true,
},
// We need to re-interpolate the config here, rather than
// just using the diff's values directly, because we've
// potentially learned more variable values during the
@ -236,6 +257,28 @@ func (n *NodeApplyableResource) evalTreeManagedResource(
},
},
// Normally we interpolate count as a preparation step before
// a DynamicExpand, but an apply graph has pre-expanded nodes
// and so the count would otherwise never be interpolated.
//
// This is redundant when there are multiple instances created
// from the same config (count > 1) but harmless since the
// underlying structures have mutexes to make this concurrency-safe.
//
// In most cases this isn't actually needed because we dealt with
// all of the counts during the plan walk, but we need to do this
// in order to support interpolation of resource counts from
// apply-time-interpolated expressions, such as those in
// "provisioner" blocks.
//
// Here we are just populating the interpolated value in-place
// inside this RawConfig object, like we would in
// NodeAbstractCountResource.
&EvalInterpolate{
Config: n.Config.RawCount,
ContinueOnErr: true,
},
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,

View File

@ -213,10 +213,16 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState(
// Determine the dependencies for the state.
stateDeps := n.StateReferences()
// n.Config can be nil if the config and state don't match
var raw *config.RawConfig
if n.Config != nil {
raw = n.Config.RawConfig.Copy()
}
return &EvalSequence{
Nodes: []EvalNode{
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Config: raw,
Resource: resource,
Output: &resourceConfig,
},

View File

@ -117,7 +117,7 @@ func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
// the state, there is little chance that these aren't actually equal.
// Log the error condition for reference, but continue with the state
// we have.
log.Println("[WARNING] Plan state and ContextOpts state are not equal")
log.Println("[WARN] Plan state and ContextOpts state are not equal")
}
thisVersion := version.String()

View File

@ -9,6 +9,7 @@ import (
"io"
"io/ioutil"
"log"
"os"
"reflect"
"sort"
"strconv"
@ -16,10 +17,10 @@ import (
"sync"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/config"
"github.com/mitchellh/copystructure"
"github.com/satori/go.uuid"
tfversion "github.com/hashicorp/terraform/version"
)
@ -706,7 +707,11 @@ func (s *State) EnsureHasLineage() {
func (s *State) ensureHasLineage() {
if s.Lineage == "" {
s.Lineage = uuid.NewV4().String()
lineage, err := uuid.GenerateUUID()
if err != nil {
panic(fmt.Errorf("Failed to generate lineage: %v", err))
}
s.Lineage = lineage
log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
} else {
log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
@ -1872,13 +1877,21 @@ var ErrNoState = errors.New("no state")
// ReadState reads a state structure out of a reader in the format that
// was written by WriteState.
func ReadState(src io.Reader) (*State, error) {
buf := bufio.NewReader(src)
if _, err := buf.Peek(1); err != nil {
// the error is either io.EOF or "invalid argument", and both are from
// an empty state.
// check for a nil file specifically, since that produces a platform
// specific error if we try to use it in a bufio.Reader.
if f, ok := src.(*os.File); ok && f == nil {
return nil, ErrNoState
}
buf := bufio.NewReader(src)
if _, err := buf.Peek(1); err != nil {
if err == io.EOF {
return nil, ErrNoState
}
return nil, err
}
if err := testForV0State(buf); err != nil {
return nil, err
}

View File

@ -1,7 +1,10 @@
package terraform
import (
"log"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/dag"
)
// OutputTransformer is a GraphTransformer that adds all the outputs
@ -41,11 +44,6 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
// Add all outputs here
for _, o := range os {
// Build the node.
//
// NOTE: For now this is just an "applyable" output. As we build
// new graph builders for the other operations I suspect we'll
// find a way to parameterize this, require new transforms, etc.
node := &NodeApplyableOutput{
PathValue: normalizeModulePath(m.Path()),
Config: o,
@ -57,3 +55,41 @@ func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
return nil
}
// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete
// outputs during destroy. We need to do this to ensure that no stale outputs
// are ever left in the state.
type DestroyOutputTransformer struct {
}
func (t *DestroyOutputTransformer) Transform(g *Graph) error {
for _, v := range g.Vertices() {
output, ok := v.(*NodeApplyableOutput)
if !ok {
continue
}
// create the destroy node for this output
node := &NodeDestroyableOutput{
PathValue: output.PathValue,
Config: output.Config,
}
log.Printf("[TRACE] creating %s", node.Name())
g.Add(node)
deps, err := g.Descendents(v)
if err != nil {
return err
}
// the destroy node must depend on the eval node
deps.Add(v)
for _, d := range deps.List() {
log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d))
g.Connect(dag.BasicEdge(node, d))
}
}
return nil
}

View File

@ -77,15 +77,14 @@ func (t *ReferenceTransformer) Transform(g *Graph) error {
}
// DestroyReferenceTransformer is a GraphTransformer that reverses the edges
// for nodes that depend on an Output or Local value. Output and local nodes are
// removed during destroy, so anything which depends on them must be evaluated
// first. These can't be interpolated during destroy, so the stored value must
// be used anyway hence they don't need to be re-evaluated.
// for locals and outputs that depend on other nodes which will be
// removed during destroy. If a destroy node is evaluated before the local or
// output value, it will be removed from the state, and the later interpolation
// will fail.
type DestroyValueReferenceTransformer struct{}
func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
vs := g.Vertices()
for _, v := range vs {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
@ -94,13 +93,62 @@ func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error {
continue
}
// reverse any incoming edges so that the value is removed last
for _, e := range g.EdgesTo(v) {
source := e.Source()
log.Printf("[TRACE] output dep: %s", dag.VertexName(source))
// reverse any outgoing edges so that the value is evaluated first.
for _, e := range g.EdgesFrom(v) {
target := e.Target()
// only destroy nodes will be evaluated in reverse
if _, ok := target.(GraphNodeDestroyer); !ok {
continue
}
log.Printf("[TRACE] output dep: %s", dag.VertexName(target))
g.RemoveEdge(e)
g.Connect(&DestroyEdge{S: v, T: source})
g.Connect(&DestroyEdge{S: target, T: v})
}
}
return nil
}
// PruneUnusedValuesTransformer is s GraphTransformer that removes local and
// output values which are not referenced in the graph. Since outputs and
// locals always need to be evaluated, if they reference a resource that is not
// available in the state the interpolation could fail.
type PruneUnusedValuesTransformer struct{}
func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
// this might need multiple runs in order to ensure that pruning a value
// doesn't effect a previously checked value.
for removed := 0; ; removed = 0 {
for _, v := range g.Vertices() {
switch v.(type) {
case *NodeApplyableOutput, *NodeLocal:
// OK
default:
continue
}
dependants := g.UpEdges(v)
switch dependants.Len() {
case 0:
// nothing at all depends on this
g.Remove(v)
removed++
case 1:
// because an output's destroy node always depends on the output,
// we need to check for the case of a single destroy node.
d := dependants.List()[0]
if _, ok := d.(*NodeDestroyableOutput); ok {
g.Remove(v)
removed++
}
}
}
if removed == 0 {
break
}
}

View File

@ -217,6 +217,12 @@ func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool
if _, ok := d.(*NodeCountBoundary); ok {
continue
}
if !targetedNodes.Include(d) {
// this one is going to be removed, so it doesn't count
continue
}
// as soon as we see a real dependency, we mark this as
// non-removable
return true

View File

@ -1,16 +1,13 @@
package terraform
import (
"fmt"
"runtime"
"github.com/hashicorp/terraform/version"
"github.com/hashicorp/terraform/httpclient"
)
// The standard Terraform User-Agent format
const UserAgent = "Terraform %s (%s)"
// Generate a UserAgent string
//
// Deprecated: Use httpclient.UserAgentString if you are setting your
// own User-Agent header.
func UserAgentString() string {
return fmt.Sprintf(UserAgent, version.String(), runtime.Version())
return httpclient.UserAgentString()
}

View File

@ -11,12 +11,12 @@ import (
)
// The main version number that is being run at the moment.
const Version = "0.11.3"
const Version = "0.11.7"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
// such as "dev" (in development), "beta", "rc1", etc.
var Prerelease = "dev"
var Prerelease = ""
// SemVer is an instance of version.Version. This has the secondary
// benefit of verifying during tests and init time that our version is a

View File

@ -156,9 +156,13 @@ func (w *walker) Exit(l reflectwalk.Location) error {
}
switch l {
case reflectwalk.Array:
fallthrough
case reflectwalk.Map:
fallthrough
case reflectwalk.Slice:
w.replacePointerMaybe()
// Pop map off our container
w.cs = w.cs[:len(w.cs)-1]
case reflectwalk.MapValue:
@ -171,16 +175,27 @@ func (w *walker) Exit(l reflectwalk.Location) error {
// or in this case never adds it. We need to create a properly typed
// zero value so that this key can be set.
if !mv.IsValid() {
mv = reflect.Zero(m.Type().Elem())
mv = reflect.Zero(m.Elem().Type().Elem())
}
m.Elem().SetMapIndex(mk, mv)
case reflectwalk.ArrayElem:
// Pop off the value and the index and set it on the array
v := w.valPop()
i := w.valPop().Interface().(int)
if v.IsValid() {
a := w.cs[len(w.cs)-1]
ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
if ae.CanSet() {
ae.Set(v)
}
}
m.SetMapIndex(mk, mv)
case reflectwalk.SliceElem:
// Pop off the value and the index and set it on the slice
v := w.valPop()
i := w.valPop().Interface().(int)
if v.IsValid() {
s := w.cs[len(w.cs)-1]
se := s.Index(i)
se := s.Elem().Index(i)
if se.CanSet() {
se.Set(v)
}
@ -220,9 +235,9 @@ func (w *walker) Map(m reflect.Value) error {
// Create the map. If the map itself is nil, then just make a nil map
var newMap reflect.Value
if m.IsNil() {
newMap = reflect.Indirect(reflect.New(m.Type()))
newMap = reflect.New(m.Type())
} else {
newMap = reflect.MakeMap(m.Type())
newMap = wrapPtr(reflect.MakeMap(m.Type()))
}
w.cs = append(w.cs, newMap)
@ -287,9 +302,9 @@ func (w *walker) Slice(s reflect.Value) error {
var newS reflect.Value
if s.IsNil() {
newS = reflect.Indirect(reflect.New(s.Type()))
newS = reflect.New(s.Type())
} else {
newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap())
newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
}
w.cs = append(w.cs, newS)
@ -309,6 +324,31 @@ func (w *walker) SliceElem(i int, elem reflect.Value) error {
return nil
}
func (w *walker) Array(a reflect.Value) error {
if w.ignoring() {
return nil
}
w.lock(a)
newA := reflect.New(a.Type())
w.cs = append(w.cs, newA)
w.valPush(newA)
return nil
}
func (w *walker) ArrayElem(i int, elem reflect.Value) error {
if w.ignoring() {
return nil
}
// We don't write the array here because elem might still be
// arbitrarily complex. Just record the index and continue on.
w.valPush(reflect.ValueOf(i))
return nil
}
func (w *walker) Struct(s reflect.Value) error {
if w.ignoring() {
return nil
@ -326,7 +366,10 @@ func (w *walker) Struct(s reflect.Value) error {
return err
}
v = reflect.ValueOf(dup)
// We need to put a pointer to the value on the value stack,
// so allocate a new pointer and set it.
v = reflect.New(s.Type())
reflect.Indirect(v).Set(reflect.ValueOf(dup))
} else {
// No copier, we copy ourselves and allow reflectwalk to guide
// us deeper into the structure for copying.
@ -405,6 +448,23 @@ func (w *walker) replacePointerMaybe() {
}
v := w.valPop()
// If the expected type is a pointer to an interface of any depth,
// such as *interface{}, **interface{}, etc., then we need to convert
// the value "v" from *CONCRETE to *interface{} so types match for
// Set.
//
// Example if v is type *Foo where Foo is a struct, v would become
// *interface{} instead. This only happens if we have an interface expectation
// at this depth.
//
// For more info, see GH-16
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
y := reflect.New(iType) // Create *interface{}
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
v = y // v is now typed *interface{} (where *v = Foo)
}
for i := 1; i < w.ps[w.depth]; i++ {
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
iface := reflect.New(iType).Elem()
@ -475,3 +535,14 @@ func (w *walker) lock(v reflect.Value) {
locker.Lock()
w.locks[w.depth] = locker
}
// wrapPtr is a helper that takes v and always make it *v. copystructure
// stores things internally as pointers until the last moment before unwrapping
func wrapPtr(v reflect.Value) reflect.Value {
if !v.IsValid() {
return v
}
vPtr := reflect.New(v.Type())
vPtr.Elem().Set(v)
return vPtr
}

View File

@ -1,4 +1,4 @@
# mapstructure
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.

View File

@ -38,12 +38,6 @@ func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Type, to reflect.Type,
data interface{}) (interface{}, error) {
// Build our arguments that reflect expects
argVals := make([]reflect.Value, 3)
argVals[0] = reflect.ValueOf(from)
argVals[1] = reflect.ValueOf(to)
argVals[2] = reflect.ValueOf(data)
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from, to, data)
@ -121,6 +115,30 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
@ -132,9 +150,8 @@ func WeaklyTypedHook(
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
} else {
return "0", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:

View File

@ -1,5 +1,5 @@
// The mapstructure package exposes functionality to convert an
// arbitrary map[string]interface{} into a native Go structure.
// Package mapstructure exposes functionality to convert an arbitrary
// map[string]interface{} into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
@ -32,7 +32,12 @@ import (
// both.
type DecodeHookFunc interface{}
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
// the source and target types.
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder
@ -109,12 +114,12 @@ type Metadata struct {
Unused []string
}
// Decode takes a map and uses reflection to convert it into the
// given Go native structure. val must be a pointer to a struct.
func Decode(m interface{}, rawVal interface{}) error {
// Decode takes an input structure and uses reflection to translate it to
// the output structure. output must be a pointer to a map or struct.
func Decode(input interface{}, output interface{}) error {
config := &DecoderConfig{
Metadata: nil,
Result: rawVal,
Result: output,
}
decoder, err := NewDecoder(config)
@ -122,7 +127,7 @@ func Decode(m interface{}, rawVal interface{}) error {
return err
}
return decoder.Decode(m)
return decoder.Decode(input)
}
// WeakDecode is the same as Decode but is shorthand to enable
@ -142,6 +147,40 @@ func WeakDecode(input, output interface{}) error {
return decoder.Decode(input)
}
// DecodeMetadata is the same as Decode, but is shorthand to
// enable metadata collection. See DecoderConfig for more info.
func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
config := &DecoderConfig{
Metadata: metadata,
Result: output,
}
decoder, err := NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// WeakDecodeMetadata is the same as Decode, but is shorthand to
// enable both WeaklyTypedInput and metadata collection. See
// DecoderConfig for more info.
func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
config := &DecoderConfig{
Metadata: metadata,
Result: output,
WeaklyTypedInput: true,
}
decoder, err := NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// NewDecoder returns a new decoder for the given configuration. Once
// a decoder has been returned, the same configuration must not be used
// again.
@ -179,68 +218,81 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
// Decode decodes the given raw interface to the target pointer specified
// by the configuration.
func (d *Decoder) Decode(raw interface{}) error {
return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
func (d *Decoder) Decode(input interface{}) error {
return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
}
// Decodes an unknown data type into a specific reflection value.
func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
if data == nil {
// If the data is nil, then we don't set anything.
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
if input == nil {
// If the data is nil, then we don't set anything, unless ZeroFields is set
// to true.
if d.config.ZeroFields {
outVal.Set(reflect.Zero(outVal.Type()))
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
}
return nil
}
dataVal := reflect.ValueOf(data)
if !dataVal.IsValid() {
// If the data value is invalid, then we just set the value
inputVal := reflect.ValueOf(input)
if !inputVal.IsValid() {
// If the input value is invalid, then we just set the value
// to be the zero value.
val.Set(reflect.Zero(val.Type()))
outVal.Set(reflect.Zero(outVal.Type()))
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
return nil
}
if d.config.DecodeHook != nil {
// We have a DecodeHook, so let's pre-process the data.
// We have a DecodeHook, so let's pre-process the input.
var err error
data, err = DecodeHookExec(
input, err = DecodeHookExec(
d.config.DecodeHook,
dataVal.Type(), val.Type(), data)
inputVal.Type(), outVal.Type(), input)
if err != nil {
return fmt.Errorf("error decoding '%s': %s", name, err)
}
}
var err error
dataKind := getKind(val)
switch dataKind {
inputKind := getKind(outVal)
switch inputKind {
case reflect.Bool:
err = d.decodeBool(name, data, val)
err = d.decodeBool(name, input, outVal)
case reflect.Interface:
err = d.decodeBasic(name, data, val)
err = d.decodeBasic(name, input, outVal)
case reflect.String:
err = d.decodeString(name, data, val)
err = d.decodeString(name, input, outVal)
case reflect.Int:
err = d.decodeInt(name, data, val)
err = d.decodeInt(name, input, outVal)
case reflect.Uint:
err = d.decodeUint(name, data, val)
err = d.decodeUint(name, input, outVal)
case reflect.Float32:
err = d.decodeFloat(name, data, val)
err = d.decodeFloat(name, input, outVal)
case reflect.Struct:
err = d.decodeStruct(name, data, val)
err = d.decodeStruct(name, input, outVal)
case reflect.Map:
err = d.decodeMap(name, data, val)
err = d.decodeMap(name, input, outVal)
case reflect.Ptr:
err = d.decodePtr(name, data, val)
err = d.decodePtr(name, input, outVal)
case reflect.Slice:
err = d.decodeSlice(name, data, val)
err = d.decodeSlice(name, input, outVal)
case reflect.Array:
err = d.decodeArray(name, input, outVal)
case reflect.Func:
err = d.decodeFunc(name, data, val)
err = d.decodeFunc(name, input, outVal)
default:
// If we reached this point then we weren't able to decode it
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
return fmt.Errorf("%s: unsupported type: %s", name, inputKind)
}
// If we reached here, then we successfully decoded SOMETHING, so
// mark the key as used if we're tracking metadata.
// mark the key as used if we're tracking metainput.
if d.config.Metadata != nil && name != "" {
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
}
@ -251,6 +303,9 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
// This decodes a basic type (bool, int, string, etc.) and sets the
// value to "data" of that type.
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
if val.IsValid() && val.Elem().IsValid() {
return d.decode(name, data, val.Elem())
}
dataVal := reflect.ValueOf(data)
if !dataVal.IsValid() {
dataVal = reflect.Zero(val.Type())
@ -287,12 +342,22 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
dataKind == reflect.Array && d.config.WeaklyTypedInput:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
switch {
case elemKind == reflect.Uint8:
val.SetString(string(dataVal.Interface().([]uint8)))
switch elemKind {
case reflect.Uint8:
var uints []uint8
if dataKind == reflect.Array {
uints = make([]uint8, dataVal.Len(), dataVal.Len())
for i := range uints {
uints[i] = dataVal.Index(i).Interface().(uint8)
}
} else {
uints = dataVal.Interface().([]uint8)
}
val.SetString(string(uints))
default:
converted = false
}
@ -436,7 +501,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
case dataKind == reflect.Uint:
val.SetFloat(float64(dataVal.Uint()))
case dataKind == reflect.Float32:
val.SetFloat(float64(dataVal.Float()))
val.SetFloat(dataVal.Float())
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetFloat(1)
@ -482,34 +547,50 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
valMap = reflect.MakeMap(mapType)
}
// Check input type
// Check input type and based on the input type jump to the proper func
dataVal := reflect.Indirect(reflect.ValueOf(data))
if dataVal.Kind() != reflect.Map {
// In weak mode, we accept a slice of maps as an input...
switch dataVal.Kind() {
case reflect.Map:
return d.decodeMapFromMap(name, dataVal, val, valMap)
case reflect.Struct:
return d.decodeMapFromStruct(name, dataVal, val, valMap)
case reflect.Array, reflect.Slice:
if d.config.WeaklyTypedInput {
switch dataVal.Kind() {
case reflect.Array, reflect.Slice:
// Special case for BC reasons (covered by tests)
if dataVal.Len() == 0 {
val.Set(valMap)
return nil
}
for i := 0; i < dataVal.Len(); i++ {
err := d.decode(
fmt.Sprintf("%s[%d]", name, i),
dataVal.Index(i).Interface(), val)
if err != nil {
return err
}
}
return nil
}
return d.decodeMapFromSlice(name, dataVal, val, valMap)
}
fallthrough
default:
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
}
}
func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
// Special case for BC reasons (covered by tests)
if dataVal.Len() == 0 {
val.Set(valMap)
return nil
}
for i := 0; i < dataVal.Len(); i++ {
err := d.decode(
fmt.Sprintf("%s[%d]", name, i),
dataVal.Index(i).Interface(), val)
if err != nil {
return err
}
}
return nil
}
func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
valType := val.Type()
valKeyType := valType.Key()
valElemType := valType.Elem()
// Accumulate errors
errors := make([]string, 0)
@ -546,22 +627,106 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
return nil
}
func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
typ := dataVal.Type()
for i := 0; i < typ.NumField(); i++ {
// Get the StructField first since this is a cheap operation. If the
// field is unexported, then ignore it.
f := typ.Field(i)
if f.PkgPath != "" {
continue
}
// Next get the actual value of this field and verify it is assignable
// to the map value.
v := dataVal.Field(i)
if !v.Type().AssignableTo(valMap.Type().Elem()) {
return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
}
tagValue := f.Tag.Get(d.config.TagName)
tagParts := strings.Split(tagValue, ",")
// Determine the name of the key in the map
keyName := f.Name
if tagParts[0] != "" {
if tagParts[0] == "-" {
continue
}
keyName = tagParts[0]
}
// If "squash" is specified in the tag, we squash the field down.
squash := false
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
}
if squash && v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
}
switch v.Kind() {
// this is an embedded struct, so handle it differently
case reflect.Struct:
x := reflect.New(v.Type())
x.Elem().Set(v)
vType := valMap.Type()
vKeyType := vType.Key()
vElemType := vType.Elem()
mType := reflect.MapOf(vKeyType, vElemType)
vMap := reflect.MakeMap(mType)
err := d.decode(keyName, x.Interface(), vMap)
if err != nil {
return err
}
if squash {
for _, k := range vMap.MapKeys() {
valMap.SetMapIndex(k, vMap.MapIndex(k))
}
} else {
valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
}
default:
valMap.SetMapIndex(reflect.ValueOf(keyName), v)
}
}
if val.CanAddr() {
val.Set(valMap)
}
return nil
}
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
valType := val.Type()
valElemType := valType.Elem()
realVal := val
if realVal.IsNil() || d.config.ZeroFields {
realVal = reflect.New(valElemType)
}
if val.CanSet() {
realVal := val
if realVal.IsNil() || d.config.ZeroFields {
realVal = reflect.New(valElemType)
}
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
return err
}
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
return err
}
val.Set(realVal)
val.Set(realVal)
} else {
if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
return err
}
}
return nil
}
@ -597,7 +762,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
val.Set(reflect.MakeSlice(sliceType, 0, 0))
return nil
}
case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
return d.decodeSlice(name, []byte(dataVal.String()), val)
// All other types we try to convert to the slice type
// and "lift" it into it. i.e. a string becomes a string slice.
default:
@ -605,7 +771,6 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return d.decodeSlice(name, []interface{}{data}, val)
}
}
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
@ -642,6 +807,73 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
return nil
}
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataValKind := dataVal.Kind()
valType := val.Type()
valElemType := valType.Elem()
arrayType := reflect.ArrayOf(valType.Len(), valElemType)
valArray := val
if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput {
switch {
// Empty maps turn into empty arrays
case dataValKind == reflect.Map:
if dataVal.Len() == 0 {
val.Set(reflect.Zero(arrayType))
return nil
}
// All other types we try to convert to the array type
// and "lift" it into it. i.e. a string becomes a string array.
default:
// Just re-try this function with data as a slice.
return d.decodeArray(name, []interface{}{data}, val)
}
}
return fmt.Errorf(
"'%s': source data must be an array or slice, got %s", name, dataValKind)
}
if dataVal.Len() > arrayType.Len() {
return fmt.Errorf(
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
}
// Make a new array to hold our result, same size as the original data.
valArray = reflect.New(arrayType).Elem()
}
// Accumulate any errors
errors := make([]string, 0)
for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface()
currentField := valArray.Index(i)
fieldName := fmt.Sprintf("%s[%d]", name, i)
if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err)
}
}
// Finally, set the value to the array we built up
val.Set(valArray)
// If there were errors, we return those
if len(errors) > 0 {
return &Error{errors}
}
return nil
}
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
@ -681,7 +913,11 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
fields := make(map[*reflect.StructField]reflect.Value)
type field struct {
field reflect.StructField
val reflect.Value
}
fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
@ -707,20 +943,22 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
errors = appendErrors(errors,
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
} else {
structs = append(structs, val.FieldByName(fieldType.Name))
structs = append(structs, structVal.FieldByName(fieldType.Name))
}
continue
}
// Normal struct field, store it away
fields[&fieldType] = structVal.Field(i)
fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
for fieldType, field := range fields {
fieldName := fieldType.Name
// for fieldType, field := range fields {
for _, f := range fields {
field, fieldValue := f.field, f.val
fieldName := field.Name
tagValue := fieldType.Tag.Get(d.config.TagName)
tagValue := field.Tag.Get(d.config.TagName)
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
@ -755,14 +993,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Delete the key we're using from the unused map so we stop tracking
delete(dataValKeysUnused, rawMapKey.Interface())
if !field.IsValid() {
if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
if !field.CanSet() {
if !fieldValue.CanSet() {
continue
}
@ -772,7 +1010,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
}
if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
errors = appendErrors(errors, err)
}
}

View File

@ -11,6 +11,8 @@ const (
MapValue
Slice
SliceElem
Array
ArrayElem
Struct
StructField
WalkLoc

View File

@ -1,15 +1,15 @@
// generated by stringer -type=Location location.go; DO NOT EDIT
// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
package reflectwalk
import "fmt"
const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc"
const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59}
var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
func (i Location) String() string {
if i+1 >= Location(len(_Location_index)) {
if i >= Location(len(_Location_index)-1) {
return fmt.Sprintf("Location(%d)", i)
}
return _Location_name[_Location_index[i]:_Location_index[i+1]]

View File

@ -39,6 +39,13 @@ type SliceWalker interface {
SliceElem(int, reflect.Value) error
}
// ArrayWalker implementations are able to handle array elements found
// within complex structures.
type ArrayWalker interface {
Array(reflect.Value) error
ArrayElem(int, reflect.Value) error
}
// StructWalker is an interface that has methods that are called for
// structs when a Walk is done.
type StructWalker interface {
@ -65,6 +72,7 @@ type PointerWalker interface {
// SkipEntry can be returned from walk functions to skip walking
// the value of this field. This is only valid in the following functions:
//
// - Struct: skips all fields from being walked
// - StructField: skips walking the struct value
//
var SkipEntry = errors.New("skip this entry")
@ -179,6 +187,9 @@ func walk(v reflect.Value, w interface{}) (err error) {
case reflect.Struct:
err = walkStruct(v, w)
return
case reflect.Array:
err = walkArray(v, w)
return
default:
panic("unsupported type: " + k.String())
}
@ -286,48 +297,99 @@ func walkSlice(v reflect.Value, w interface{}) (err error) {
return nil
}
func walkArray(v reflect.Value, w interface{}) (err error) {
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(Array)
}
if aw, ok := w.(ArrayWalker); ok {
if err := aw.Array(v); err != nil {
return err
}
}
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
if aw, ok := w.(ArrayWalker); ok {
if err := aw.ArrayElem(i, elem); err != nil {
return err
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(ArrayElem)
}
if err := walk(elem, w); err != nil {
return err
}
if ok {
ew.Exit(ArrayElem)
}
}
ew, ok = w.(EnterExitWalker)
if ok {
ew.Exit(Array)
}
return nil
}
func walkStruct(v reflect.Value, w interface{}) (err error) {
ew, ewok := w.(EnterExitWalker)
if ewok {
ew.Enter(Struct)
}
skip := false
if sw, ok := w.(StructWalker); ok {
if err = sw.Struct(v); err != nil {
err = sw.Struct(v)
if err == SkipEntry {
skip = true
err = nil
}
if err != nil {
return
}
}
vt := v.Type()
for i := 0; i < vt.NumField(); i++ {
sf := vt.Field(i)
f := v.FieldByIndex([]int{i})
if !skip {
vt := v.Type()
for i := 0; i < vt.NumField(); i++ {
sf := vt.Field(i)
f := v.FieldByIndex([]int{i})
if sw, ok := w.(StructWalker); ok {
err = sw.StructField(sf, f)
if sw, ok := w.(StructWalker); ok {
err = sw.StructField(sf, f)
// SkipEntry just pretends this field doesn't even exist
if err == SkipEntry {
continue
// SkipEntry just pretends this field doesn't even exist
if err == SkipEntry {
continue
}
if err != nil {
return
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(StructField)
}
err = walk(f, w)
if err != nil {
return
}
}
ew, ok := w.(EnterExitWalker)
if ok {
ew.Enter(StructField)
}
err = walk(f, w)
if err != nil {
return
}
if ok {
ew.Exit(StructField)
if ok {
ew.Exit(StructField)
}
}
}

168
vendor/vendor.json vendored
View File

@ -537,236 +537,242 @@
"revisionTime": "2015-06-09T07:04:31Z"
},
{
"checksumSHA1": "fJMQji3y8ywlelHtnF3UIvct/Eo=",
"checksumSHA1": "D2qVXjDywJu6wLj/4NCTsFnRrvw=",
"path": "github.com/hashicorp/terraform/config",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "WzQP2WfiCYlaALKZVqEFsxZsG1o=",
"path": "github.com/hashicorp/terraform/config/configschema",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "3V7300kyZF+AGy/cOKV0+P6M3LY=",
"path": "github.com/hashicorp/terraform/config/hcl2shim",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "7+cYlhS0+Z/xYUzYQft8Wibs1GA=",
"checksumSHA1": "HayBWvFE+t9aERoz9kpE2MODurk=",
"path": "github.com/hashicorp/terraform/config/module",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "mPbjVPD2enEey45bP4M83W2AxlY=",
"path": "github.com/hashicorp/terraform/dag",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "P8gNPDuOzmiK4Lz9xG7OBy4Rlm8=",
"path": "github.com/hashicorp/terraform/flatmap",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "zx5DLo5aV0xDqxGTzSibXg7HHAA=",
"path": "github.com/hashicorp/terraform/helper/acctest",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "uT6Q9RdSRAkDjyUgQlJ2XKJRab4=",
"path": "github.com/hashicorp/terraform/helper/config",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "qVmQPoZmJ2w2OnaxIheWfuwun6g=",
"path": "github.com/hashicorp/terraform/helper/customdiff",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "FH5eOEHfHgdxPC/JnfmCeSBk66U=",
"path": "github.com/hashicorp/terraform/helper/encryption",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "KNvbU1r5jv0CBeQLnEtDoL3dRtc=",
"path": "github.com/hashicorp/terraform/helper/hashcode",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "B267stWNQd0/pBTXHfI/tJsxzfc=",
"path": "github.com/hashicorp/terraform/helper/hilmapstructure",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "BAXV9ruAyno3aFgwYI2/wWzB2Gc=",
"path": "github.com/hashicorp/terraform/helper/logging",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "twkFd4x71kBnDfrdqO5nhs8dMOY=",
"path": "github.com/hashicorp/terraform/helper/mutexkv",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "ImyqbHM/xe3eAT2moIjLI8ksuks=",
"path": "github.com/hashicorp/terraform/helper/pathorcontents",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "9d4zouxtH24HFa6RuUdq7lG3tgQ=",
"checksumSHA1": "ryCWu7RtMlYrAfSevaI7RtaXe98=",
"path": "github.com/hashicorp/terraform/helper/resource",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "qDtYSEdKmZQZ1tS/rsW9aH6+lHg=",
"checksumSHA1": "JHxGzmxcIS8NyLX9pGhK5beIra4=",
"path": "github.com/hashicorp/terraform/helper/schema",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "Fzbv+N7hFXOtrR6E7ZcHT3jEE9s=",
"path": "github.com/hashicorp/terraform/helper/structure",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "6O4zxgqAD+QZm6plsIfl4MH310Q=",
"checksumSHA1": "nEC56vB6M60BJtGPe+N9rziHqLg=",
"path": "github.com/hashicorp/terraform/helper/validation",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "kD1ayilNruf2cES1LDfNZjYRscQ=",
"path": "github.com/hashicorp/terraform/httpclient",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "yFWmdS6yEJZpRJzUqd/mULqCYGk=",
"path": "github.com/hashicorp/terraform/moduledeps",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "DqaoG++NXRCfvH/OloneLWrM+3k=",
"path": "github.com/hashicorp/terraform/plugin",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "tD8r8iNg//TN8c2GFuTnyHKBCPY=",
"checksumSHA1": "tx5xrdiUWdAHqoRV5aEfALgT1aU=",
"path": "github.com/hashicorp/terraform/plugin/discovery",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "OzxWbZ+8/ogP/wSfXgcSc+o+ulQ=",
"checksumSHA1": "f6wDpr0uHKZqQw4ztvxMrtiuvQo=",
"path": "github.com/hashicorp/terraform/registry",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "cR87P4V5aiEfvF+1qoBi2JQyQS4=",
"path": "github.com/hashicorp/terraform/registry/regsrc",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "y9IXgIJQq9XNy1zIYUV2Kc0KsnA=",
"path": "github.com/hashicorp/terraform/registry/response",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "VXlzRRDVOqeMvnnrbUcR9H64OA4=",
"path": "github.com/hashicorp/terraform/svchost",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "GzcKNlFL0N77JVjU8qbltXE4R3k=",
"path": "github.com/hashicorp/terraform/svchost/auth",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "iGPn4dJF6fT/b+PFSWuimW3GiX8=",
"checksumSHA1": "jiDWmQieUE6OoUBMs53hj9P/JDQ=",
"path": "github.com/hashicorp/terraform/svchost/disco",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "XlNgGENevewsiFAszkaPV9Eg2cU=",
"checksumSHA1": "lHCKONqlaHsn5cEaYltad7dvRq8=",
"path": "github.com/hashicorp/terraform/terraform",
"revision": "a6008b8a48a749c7c167453b9cf55ffd572b9a5d",
"revisionTime": "2018-01-09T23:13:33Z",
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z",
"version": "v0.11.2",
"versionExact": "v0.11.2"
},
{
"checksumSHA1": "+K+oz9mMTmQMxIA3KVkGRfjvm9I=",
"path": "github.com/hashicorp/terraform/tfdiags",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "NvEryf++JgADxzFLG8T8Wd8Hie0=",
"checksumSHA1": "+attjxAt9nwFpCjxWEL08YwpGD8=",
"path": "github.com/hashicorp/terraform/version",
"revision": "096847c9f774bfb946de7413260b30f9f6041241",
"revisionTime": "2018-01-09T23:28:56Z"
"revision": "41e50bd32a8825a84535e353c3674af8ce799161",
"revisionTime": "2018-04-10T16:50:42Z"
},
{
"checksumSHA1": "au+CDkddC4sVFV15UaPiI7FvSw0=",
@ -892,10 +898,10 @@
"revisionTime": "2017-09-08T18:10:43Z"
},
{
"checksumSHA1": "guxbLo8KHHBeM0rzou4OTzzpDNs=",
"checksumSHA1": "+p4JY4wmFQAppCdlrJ8Kxybmht8=",
"path": "github.com/mitchellh/copystructure",
"revision": "5af94aef99f597e6a9e1f6ac6be6ce0f3c96b49d",
"revisionTime": "2016-10-13T19:53:42Z"
"revision": "d23ffcb85de31694d6ccaa23ccb4a03e55c1303f",
"revisionTime": "2017-05-25T01:39:02Z"
},
{
"checksumSHA1": "V/quM7+em2ByJbWBLOsEwnY3j/Q=",
@ -921,16 +927,16 @@
"revision": "6b17d669fac5e2f71c16658d781ec3fdd3802b69"
},
{
"checksumSHA1": "MlX15lJuV8DYARX5RJY8rqrSEWQ=",
"checksumSHA1": "ewGq4nGalpCQOHcmBTdAEQx1wW0=",
"path": "github.com/mitchellh/mapstructure",
"revision": "53818660ed4955e899c0bcafa97299a388bd7c8e",
"revisionTime": "2017-03-07T20:11:23Z"
"revision": "bb74f1db0675b241733089d5a1faa5dd8b0ef57b",
"revisionTime": "2018-05-11T14:21:26Z"
},
{
"checksumSHA1": "vBpuqNfSTZcAR/0tP8tNYacySGs=",
"checksumSHA1": "AMU63CNOg4XmIhVR/S/Xttt1/f0=",
"path": "github.com/mitchellh/reflectwalk",
"revision": "92573fe8d000a145bfebc03a16bc22b34945867f",
"revisionTime": "2016-10-03T17:45:16Z"
"revision": "63d60e9d0dbc60cf9164e6510889b0db6683d98c",
"revisionTime": "2017-07-26T20:21:17Z"
},
{
"checksumSHA1": "Nt4Ol6ZM2n0XD5zatxjwEYBpQnw=",