diff --git a/api/constraint.go b/api/constraint.go index 3233a3bfdcd0..7213270e53f0 100644 --- a/api/constraint.go +++ b/api/constraint.go @@ -15,9 +15,9 @@ const ( // Constraint is used to serialize a job placement constraint. type Constraint struct { - LTarget string - RTarget string - Operand string + LTarget string `hcl:"attribute,optional"` + RTarget string `hcl:"value,optional"` + Operand string `hcl:"operator,optional"` } // NewConstraint generates a new job placement constraint. diff --git a/api/csi.go b/api/csi.go index 47d7017a2553..3c657f557e90 100644 --- a/api/csi.go +++ b/api/csi.go @@ -89,9 +89,9 @@ const ( ) type CSIMountOptions struct { - FSType string `hcl:"fs_type"` - MountFlags []string `hcl:"mount_flags"` - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` // report unexpected keys + FSType string `hcl:"fs_type,optional"` + MountFlags []string `hcl:"mount_flags,optional"` + ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` // report unexpected keys } type CSISecrets map[string]string @@ -133,7 +133,7 @@ type CSIVolume struct { ModifyIndex uint64 // ExtraKeysHCL is used by the hcl parser to report unexpected keys - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` + ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` } // allocs is called after we query the volume (creating this CSIVolume struct) to collapse diff --git a/api/jobs.go b/api/jobs.go index fc195238e366..7b4cf18a588c 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -50,6 +50,9 @@ type JobsParseRequest struct { // JobHCL is an hcl jobspec JobHCL string + // HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser + HCLv1 bool `json:"hclv1,omitempty"` + // Canonicalize is a flag as to if the server should return default values // for unset fields Canonicalize bool @@ -439,15 +442,15 @@ type periodicForceResponse struct { // UpdateStrategy defines a task groups update strategy. type UpdateStrategy struct { - Stagger *time.Duration `mapstructure:"stagger"` - MaxParallel *int `mapstructure:"max_parallel"` - HealthCheck *string `mapstructure:"health_check"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` - ProgressDeadline *time.Duration `mapstructure:"progress_deadline"` - Canary *int `mapstructure:"canary"` - AutoRevert *bool `mapstructure:"auto_revert"` - AutoPromote *bool `mapstructure:"auto_promote"` + Stagger *time.Duration `mapstructure:"stagger" hcl:"stagger,optional"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` + ProgressDeadline *time.Duration `mapstructure:"progress_deadline" hcl:"progress_deadline,optional"` + Canary *int `mapstructure:"canary" hcl:"canary,optional"` + AutoRevert *bool `mapstructure:"auto_revert" hcl:"auto_revert,optional"` + AutoPromote *bool `mapstructure:"auto_promote" hcl:"auto_promote,optional"` } // DefaultUpdateStrategy provides a baseline that can be used to upgrade @@ -640,8 +643,8 @@ func (u *UpdateStrategy) Empty() bool { } type Multiregion struct { - Strategy *MultiregionStrategy - Regions []*MultiregionRegion + Strategy *MultiregionStrategy `hcl:"strategy,block"` + Regions []*MultiregionRegion `hcl:"region,block"` } func (m *Multiregion) Canonicalize() { @@ -700,24 +703,24 @@ func (m *Multiregion) Copy() *Multiregion { } type MultiregionStrategy struct { - MaxParallel *int `mapstructure:"max_parallel"` - OnFailure *string `mapstructure:"on_failure"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + OnFailure *string `mapstructure:"on_failure" hcl:"on_failure,optional"` } type MultiregionRegion struct { - Name string - Count *int - Datacenters []string - Meta map[string]string + Name string `hcl:",label"` + Count *int `hcl:"count,optional"` + Datacenters []string `hcl:"datacenters,optional"` + Meta map[string]string `hcl:"meta,block"` } // PeriodicConfig is for serializing periodic config for a job. type PeriodicConfig struct { - Enabled *bool - Spec *string + Enabled *bool `hcl:"enabled,optional"` + Spec *string `hcl:"cron,optional"` SpecType *string - ProhibitOverlap *bool `mapstructure:"prohibit_overlap"` - TimeZone *string `mapstructure:"time_zone"` + ProhibitOverlap *bool `mapstructure:"prohibit_overlap" hcl:"prohibit_overlap,optional"` + TimeZone *string `mapstructure:"time_zone" hcl:"time_zone,optional"` } func (p *PeriodicConfig) Canonicalize() { @@ -779,38 +782,43 @@ func (p *PeriodicConfig) GetLocation() (*time.Location, error) { // ParameterizedJobConfig is used to configure the parameterized job. type ParameterizedJobConfig struct { - Payload string - MetaRequired []string `mapstructure:"meta_required"` - MetaOptional []string `mapstructure:"meta_optional"` + Payload string `hcl:"payload,optional"` + MetaRequired []string `mapstructure:"meta_required" hcl:"meta_required,optional"` + MetaOptional []string `mapstructure:"meta_optional" hcl:"meta_optional,optional"` } // Job is used to serialize a job. type Job struct { + /* Fields parsed from HCL config */ + + Region *string `hcl:"region,optional"` + Namespace *string `hcl:"namespace,optional"` + ID *string `hcl:"id,optional"` + Name *string `hcl:"name,optional"` + Type *string `hcl:"type,optional"` + Priority *int `hcl:"priority,optional"` + AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"` + Datacenters []string `hcl:"datacenters,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + TaskGroups []*TaskGroup `hcl:"group,block"` + Update *UpdateStrategy `hcl:"update,block"` + Multiregion *Multiregion `hcl:"multiregion,block"` + Spreads []*Spread `hcl:"spread,block"` + Periodic *PeriodicConfig `hcl:"periodic,block"` + ParameterizedJob *ParameterizedJobConfig `hcl:"parameterized,block"` + Reschedule *ReschedulePolicy `hcl:"reschedule,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Meta map[string]string `hcl:"meta,block"` + ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"` + VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"` + + /* Fields set by server, not sourced from job config file */ + Stop *bool - Region *string - Namespace *string - ID *string ParentID *string - Name *string - Type *string - Priority *int - AllAtOnce *bool `mapstructure:"all_at_once"` - Datacenters []string - Constraints []*Constraint - Affinities []*Affinity - TaskGroups []*TaskGroup - Update *UpdateStrategy - Multiregion *Multiregion - Spreads []*Spread - Periodic *PeriodicConfig - ParameterizedJob *ParameterizedJobConfig Dispatched bool Payload []byte - Reschedule *ReschedulePolicy - Migrate *MigrateStrategy - Meta map[string]string - ConsulToken *string `mapstructure:"consul_token"` - VaultToken *string `mapstructure:"vault_token"` VaultNamespace *string `mapstructure:"vault_namespace"` NomadTokenID *string `mapstructure:"nomad_token_id"` Status *string diff --git a/api/resources.go b/api/resources.go index 74ef55e4e1fb..f6328af92e58 100644 --- a/api/resources.go +++ b/api/resources.go @@ -7,17 +7,17 @@ import ( // Resources encapsulates the required resources of // a given task or task group. type Resources struct { - CPU *int - MemoryMB *int `mapstructure:"memory"` - DiskMB *int `mapstructure:"disk"` - Networks []*NetworkResource - Devices []*RequestedDevice + CPU *int `hcl:"cpu,optional"` + MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"` + DiskMB *int `mapstructure:"disk" hcl:"disk,optional"` + Networks []*NetworkResource `hcl:"network,block"` + Devices []*RequestedDevice `hcl:"device,block"` // COMPAT(0.10) // XXX Deprecated. Please do not use. The field will be removed in Nomad // 0.10 and is only being kept to allow any references to be removed before // then. - IOPS *int + IOPS *int `hcl:"iops,optional"` } // Canonicalize will supply missing values in the cases @@ -84,29 +84,29 @@ func (r *Resources) Merge(other *Resources) { } type Port struct { - Label string - Value int `mapstructure:"static"` - To int `mapstructure:"to"` - HostNetwork string `mapstructure:"host_network"` + Label string `hcl:",label"` + Value int `mapstructure:"static" hcl:"static,optional"` + To int `mapstructure:"to" hcl:"to,optional"` + HostNetwork string `mapstructure:"host_network" hcl:"host_network,optional"` } type DNSConfig struct { - Servers []string `mapstructure:"servers"` - Searches []string `mapstructure:"searches"` - Options []string `mapstructure:"options"` + Servers []string `mapstructure:"servers" hcl:"servers,optional"` + Searches []string `mapstructure:"searches" hcl:"searches,optional"` + Options []string `mapstructure:"options" hcl:"options,optional"` } // NetworkResource is used to describe required network // resources of a given task. type NetworkResource struct { - Mode string - Device string - CIDR string - IP string - MBits *int - DNS *DNSConfig - ReservedPorts []Port - DynamicPorts []Port + Mode string `hcl:"mode,optional"` + Device string `hcl:"device,optional"` + CIDR string `hcl:"cidr,optional"` + IP string `hcl:"ip,optional"` + MBits *int `hcl:"mbits,optional"` + DNS *DNSConfig `hcl:"dns,block"` + ReservedPorts []Port `hcl:"reserved_ports,block"` + DynamicPorts []Port `hcl:"port,block"` } func (n *NetworkResource) Canonicalize() { @@ -226,18 +226,18 @@ type RequestedDevice struct { // * "gpu" // * "nvidia/gpu" // * "nvidia/gpu/GTX2080Ti" - Name string + Name string `hcl:",label"` // Count is the number of requested devices - Count *uint64 + Count *uint64 `hcl:"count,optional"` // Constraints are a set of constraints to apply when selecting the device // to use. - Constraints []*Constraint + Constraints []*Constraint `hcl:"constraint,block"` // Affinities are a set of affinites to apply when selecting the device // to use. - Affinities []*Affinity + Affinities []*Affinity `hcl:"affinity,block"` } func (d *RequestedDevice) Canonicalize() { diff --git a/api/scaling.go b/api/scaling.go index 98c48f06c9dd..56a0a964ac79 100644 --- a/api/scaling.go +++ b/api/scaling.go @@ -60,14 +60,19 @@ type ScalingRequest struct { // ScalingPolicy is the user-specified API object for an autoscaling policy type ScalingPolicy struct { + /* fields set by user in HCL config */ + + Min *int64 `hcl:"min,optional"` + Max *int64 `hcl:"max,optional"` + Policy map[string]interface{} `hcl:"policy,block"` + Enabled *bool `hcl:"enabled,optional"` + + /* fields set by server */ + ID string Namespace string Type string Target map[string]string - Min *int64 - Max *int64 - Policy map[string]interface{} - Enabled *bool CreateIndex uint64 ModifyIndex uint64 } diff --git a/api/services.go b/api/services.go index 9640119d64bb..fb9220de490c 100644 --- a/api/services.go +++ b/api/services.go @@ -8,9 +8,9 @@ import ( // CheckRestart describes if and when a task should be restarted based on // failing health checks. type CheckRestart struct { - Limit int `mapstructure:"limit"` - Grace *time.Duration `mapstructure:"grace"` - IgnoreWarnings bool `mapstructure:"ignore_warnings"` + Limit int `mapstructure:"limit" hcl:"limit,optional"` + Grace *time.Duration `mapstructure:"grace" hcl:"grace,optional"` + IgnoreWarnings bool `mapstructure:"ignore_warnings" hcl:"ignore_warnings,optional"` } // Canonicalize CheckRestart fields if not nil. @@ -73,46 +73,46 @@ func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart { // ServiceCheck represents the consul health check that Nomad registers. type ServiceCheck struct { //FIXME Id is unused. Remove? - Id string - Name string - Type string - Command string - Args []string - Path string - Protocol string - PortLabel string `mapstructure:"port"` - Expose bool - AddressMode string `mapstructure:"address_mode"` - Interval time.Duration - Timeout time.Duration - InitialStatus string `mapstructure:"initial_status"` - TLSSkipVerify bool `mapstructure:"tls_skip_verify"` - Header map[string][]string - Method string - CheckRestart *CheckRestart `mapstructure:"check_restart"` - GRPCService string `mapstructure:"grpc_service"` - GRPCUseTLS bool `mapstructure:"grpc_use_tls"` - TaskName string `mapstructure:"task"` - SuccessBeforePassing int `mapstructure:"success_before_passing"` - FailuresBeforeCritical int `mapstructure:"failures_before_critical"` + Id string `hcl:"id,optional"` + Name string `hcl:"name,optional"` + Type string `hcl:"type,optional"` + Command string `hcl:"command,optional"` + Args []string `hcl:"args,optional"` + Path string `hcl:"path,optional"` + Protocol string `hcl:"protocol,optional"` + PortLabel string `mapstructure:"port" hcl:"port,optional"` + Expose bool `hcl:"expose,optional"` + AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` + Interval time.Duration `hcl:"interval,optional"` + Timeout time.Duration `hcl:"timeout,optional"` + InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"` + TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"` + Header map[string][]string `hcl:"header,block"` + Method string `hcl:"method,optional"` + CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` + GRPCService string `mapstructure:"grpc_service" hcl:"grpc_service,optional"` + GRPCUseTLS bool `mapstructure:"grpc_use_tls" hcl:"grpc_use_tls,optional"` + TaskName string `mapstructure:"task" hcl:"task,optional"` + SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"` + FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"` } // Service represents a Consul service definition. type Service struct { //FIXME Id is unused. Remove? - Id string - Name string - Tags []string - CanaryTags []string `mapstructure:"canary_tags"` - EnableTagOverride bool `mapstructure:"enable_tag_override"` - PortLabel string `mapstructure:"port"` - AddressMode string `mapstructure:"address_mode"` - Checks []ServiceCheck - CheckRestart *CheckRestart `mapstructure:"check_restart"` - Connect *ConsulConnect - Meta map[string]string - CanaryMeta map[string]string - TaskName string `mapstructure:"task"` + Id string `hcl:"id,optional"` + Name string `hcl:"name,optional"` + Tags []string `hcl:"tags,optional"` + CanaryTags []string `mapstructure:"canary_tags" hcl:"canary_tags,optional"` + EnableTagOverride bool `mapstructure:"enable_tag_override" hcl:"enable_tag_override,optional"` + PortLabel string `mapstructure:"port" hcl:"port,optional"` + AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` + Checks []ServiceCheck `hcl:"check,block"` + CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` + Connect *ConsulConnect `hcl:"connect,block"` + Meta map[string]string `hcl:"meta,block"` + CanaryMeta map[string]string `hcl:"canary_meta,block"` + TaskName string `mapstructure:"task" hcl:"task,optional"` } // Canonicalize the Service by ensuring its name and address mode are set. Task @@ -151,10 +151,10 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { // ConsulConnect represents a Consul Connect jobspec stanza. type ConsulConnect struct { - Native bool - Gateway *ConsulGateway - SidecarService *ConsulSidecarService `mapstructure:"sidecar_service"` - SidecarTask *SidecarTask `mapstructure:"sidecar_task"` + Native bool `hcl:"native,optional"` + Gateway *ConsulGateway `hcl:"gateway,block"` + SidecarService *ConsulSidecarService `mapstructure:"sidecar_service" hcl:"sidecar_service,block"` + SidecarTask *SidecarTask `mapstructure:"sidecar_task" hcl:"sidecar_task,block"` } func (cc *ConsulConnect) Canonicalize() { @@ -170,9 +170,9 @@ func (cc *ConsulConnect) Canonicalize() { // ConsulSidecarService represents a Consul Connect SidecarService jobspec // stanza. type ConsulSidecarService struct { - Tags []string - Port string - Proxy *ConsulProxy + Tags []string `hcl:"tags,optional"` + Port string `hcl:"port,optional"` + Proxy *ConsulProxy `hcl:"proxy,block"` } func (css *ConsulSidecarService) Canonicalize() { @@ -190,17 +190,17 @@ func (css *ConsulSidecarService) Canonicalize() { // SidecarTask represents a subset of Task fields that can be set to override // the fields of the Task generated for the sidecar type SidecarTask struct { - Name string - Driver string - User string - Config map[string]interface{} - Env map[string]string - Resources *Resources - Meta map[string]string - KillTimeout *time.Duration `mapstructure:"kill_timeout"` - LogConfig *LogConfig `mapstructure:"logs"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"` - KillSignal string `mapstructure:"kill_signal"` + Name string `hcl:"name,optional"` + Driver string `hcl:"driver,optional"` + User string `hcl:"user,optional"` + Config map[string]interface{} `hcl:"config,block"` + Env map[string]string `hcl:"env,block"` + Resources *Resources `hcl:"resources,block"` + Meta map[string]string `hcl:"meta,block"` + KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` + LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` } func (st *SidecarTask) Canonicalize() { @@ -243,11 +243,11 @@ func (st *SidecarTask) Canonicalize() { // ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. type ConsulProxy struct { - LocalServiceAddress string `mapstructure:"local_service_address"` - LocalServicePort int `mapstructure:"local_service_port"` - ExposeConfig *ConsulExposeConfig `mapstructure:"expose"` - Upstreams []*ConsulUpstream - Config map[string]interface{} + LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` + LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` + ExposeConfig *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"` + Upstreams []*ConsulUpstream `hcl:"upstreams,block"` + Config map[string]interface{} `hcl:"config,block"` } func (cp *ConsulProxy) Canonicalize() { @@ -268,12 +268,12 @@ func (cp *ConsulProxy) Canonicalize() { // ConsulUpstream represents a Consul Connect upstream jobspec stanza. type ConsulUpstream struct { - DestinationName string `mapstructure:"destination_name"` - LocalBindPort int `mapstructure:"local_bind_port"` + DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"` + LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"` } type ConsulExposeConfig struct { - Path []*ConsulExposePath `mapstructure:"path"` + Path []*ConsulExposePath `mapstructure:"path" hcl:"path,block"` } func (cec *ConsulExposeConfig) Canonicalize() { @@ -287,19 +287,19 @@ func (cec *ConsulExposeConfig) Canonicalize() { } type ConsulExposePath struct { - Path string - Protocol string - LocalPathPort int `mapstructure:"local_path_port"` - ListenerPort string `mapstructure:"listener_port"` + Path string `hcl:"path,optional"` + Protocol string `hcl:"protocol,optional"` + LocalPathPort int `mapstructure:"local_path_port" hcl:"local_path_port,optional"` + ListenerPort string `mapstructure:"listener_port" hcl:"listener_port,optional"` } // ConsulGateway is used to configure one of the Consul Connect Gateway types. type ConsulGateway struct { // Proxy is used to configure the Envoy instance acting as the gateway. - Proxy *ConsulGatewayProxy + Proxy *ConsulGatewayProxy `hcl:"proxy,block"` // Ingress represents the Consul Configuration Entry for an Ingress Gateway. - Ingress *ConsulIngressConfigEntry + Ingress *ConsulIngressConfigEntry `hcl:"ingress,block"` // Terminating is not yet supported. // Terminating *ConsulTerminatingConfigEntry @@ -328,8 +328,9 @@ func (g *ConsulGateway) Copy() *ConsulGateway { } type ConsulGatewayBindAddress struct { - Address string `mapstructure:"address"` - Port int `mapstructure:"port"` + Name string `hcl:",label"` + Address string `mapstructure:"address" hcl:"address,optional"` + Port int `mapstructure:"port" hcl:"port,optional"` } var ( @@ -341,11 +342,11 @@ var ( // // https://www.consul.io/docs/connect/proxies/envoy#gateway-options type ConsulGatewayProxy struct { - ConnectTimeout *time.Duration `mapstructure:"connect_timeout"` - EnvoyGatewayBindTaggedAddresses bool `mapstructure:"envoy_gateway_bind_tagged_addresses"` - EnvoyGatewayBindAddresses map[string]*ConsulGatewayBindAddress `mapstructure:"envoy_gateway_bind_addresses"` - EnvoyGatewayNoDefaultBind bool `mapstructure:"envoy_gateway_no_default_bind"` - Config map[string]interface{} // escape hatch envoy config + ConnectTimeout *time.Duration `mapstructure:"connect_timeout" hcl:"connect_timeout,optional"` + EnvoyGatewayBindTaggedAddresses bool `mapstructure:"envoy_gateway_bind_tagged_addresses" hcl:"envoy_gateway_bind_tagged_addresses,optional"` + EnvoyGatewayBindAddresses map[string]*ConsulGatewayBindAddress `mapstructure:"envoy_gateway_bind_addresses" hcl:"envoy_gateway_bind_addresses,block"` + EnvoyGatewayNoDefaultBind bool `mapstructure:"envoy_gateway_no_default_bind" hcl:"envoy_gateway_no_default_bind,optional"` + Config map[string]interface{} `hcl:"config,block"` // escape hatch envoy config } func (p *ConsulGatewayProxy) Canonicalize() { @@ -399,7 +400,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { // ConsulGatewayTLSConfig is used to configure TLS for a gateway. type ConsulGatewayTLSConfig struct { - Enabled bool + Enabled bool `hcl:"enabled,optional"` } func (tc *ConsulGatewayTLSConfig) Canonicalize() { @@ -419,9 +420,9 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig { type ConsulIngressService struct { // Namespace is not yet supported. // Namespace string - Name string + Name string `hcl:"name,optional"` - Hosts []string + Hosts []string `hcl:"hosts,optional"` } func (s *ConsulIngressService) Canonicalize() { @@ -458,9 +459,9 @@ const ( // ConsulIngressListener is used to configure a listener on a Consul Ingress // Gateway. type ConsulIngressListener struct { - Port int - Protocol string - Services []*ConsulIngressService + Port int `hcl:"port,optional"` + Protocol string `hcl:"protocol,optional"` + Services []*ConsulIngressService `hcl:"service,block"` } func (l *ConsulIngressListener) Canonicalize() { @@ -506,8 +507,8 @@ type ConsulIngressConfigEntry struct { // Namespace is not yet supported. // Namespace string - TLS *ConsulGatewayTLSConfig - Listeners []*ConsulIngressListener + TLS *ConsulGatewayTLSConfig `hcl:"tls,block"` + Listeners []*ConsulIngressListener `hcl:"listener,block"` } func (e *ConsulIngressConfigEntry) Canonicalize() { diff --git a/api/tasks.go b/api/tasks.go index 2de8ea5915b2..b331ff6b044a 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -67,10 +67,10 @@ type AllocResourceUsage struct { // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { - Interval *time.Duration - Attempts *int - Delay *time.Duration - Mode *string + Interval *time.Duration `hcl:"interval,optional"` + Attempts *int `hcl:"attempts,optional"` + Delay *time.Duration `hcl:"delay,optional"` + Mode *string `hcl:"mode,optional"` } func (r *RestartPolicy) Merge(rp *RestartPolicy) { @@ -91,24 +91,24 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { // Reschedule configures how Tasks are rescheduled when they crash or fail. type ReschedulePolicy struct { // Attempts limits the number of rescheduling attempts that can occur in an interval. - Attempts *int `mapstructure:"attempts"` + Attempts *int `mapstructure:"attempts" hcl:"attempts,optional"` // Interval is a duration in which we can limit the number of reschedule attempts. - Interval *time.Duration `mapstructure:"interval"` + Interval *time.Duration `mapstructure:"interval" hcl:"interval,optional"` // Delay is a minimum duration to wait between reschedule attempts. // The delay function determines how much subsequent reschedule attempts are delayed by. - Delay *time.Duration `mapstructure:"delay"` + Delay *time.Duration `mapstructure:"delay" hcl:"delay,optional"` // DelayFunction determines how the delay progressively changes on subsequent reschedule // attempts. Valid values are "exponential", "constant", and "fibonacci". - DelayFunction *string `mapstructure:"delay_function"` + DelayFunction *string `mapstructure:"delay_function" hcl:"delay_function,optional"` // MaxDelay is an upper bound on the delay. - MaxDelay *time.Duration `mapstructure:"max_delay"` + MaxDelay *time.Duration `mapstructure:"max_delay" hcl:"max_delay,optional"` // Unlimited allows rescheduling attempts until they succeed - Unlimited *bool `mapstructure:"unlimited"` + Unlimited *bool `mapstructure:"unlimited" hcl:"unlimited,optional"` } func (r *ReschedulePolicy) Merge(rp *ReschedulePolicy) { @@ -159,10 +159,10 @@ func (r *ReschedulePolicy) Canonicalize(jobType string) { // Affinity is used to serialize task group affinities type Affinity struct { - LTarget string // Left-hand target - RTarget string // Right-hand target - Operand string // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any - Weight *int8 // Weight applied to nodes that match the affinity. Can be negative + LTarget string `hcl:"attribute,optional"` // Left-hand target + RTarget string `hcl:"value,optional"` // Right-hand target + Operand string `hcl:"operator,optional"` // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any + Weight *int8 `hcl:"weight,optional"` // Weight applied to nodes that match the affinity. Can be negative } func NewAffinity(LTarget string, Operand string, RTarget string, Weight int8) *Affinity { @@ -255,15 +255,15 @@ func (p *ReschedulePolicy) String() string { // Spread is used to serialize task group allocation spread preferences type Spread struct { - Attribute string - Weight *int8 - SpreadTarget []*SpreadTarget + Attribute string `hcl:"attribute,optional"` + Weight *int8 `hcl:"weight,optional"` + SpreadTarget []*SpreadTarget `hcl:"target,block"` } // SpreadTarget is used to serialize target allocation spread percentages type SpreadTarget struct { - Value string - Percent uint8 + Value string `hcl:",label"` + Percent uint8 `hcl:"percent,optional"` } func NewSpreadTarget(value string, percent uint8) *SpreadTarget { @@ -289,9 +289,9 @@ func (s *Spread) Canonicalize() { // EphemeralDisk is an ephemeral disk object type EphemeralDisk struct { - Sticky *bool - Migrate *bool - SizeMB *int `mapstructure:"size"` + Sticky *bool `hcl:"sticky,optional"` + Migrate *bool `hcl:"migrate,optional"` + SizeMB *int `mapstructure:"size" hcl:"size,optional"` } func DefaultEphemeralDisk() *EphemeralDisk { @@ -317,10 +317,10 @@ func (e *EphemeralDisk) Canonicalize() { // MigrateStrategy describes how allocations for a task group should be // migrated between nodes (eg when draining). type MigrateStrategy struct { - MaxParallel *int `mapstructure:"max_parallel"` - HealthCheck *string `mapstructure:"health_check"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` } func DefaultMigrateStrategy() *MigrateStrategy { @@ -377,12 +377,12 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy { // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool `hcl:"read_only"` - MountOptions *CSIMountOptions `hcl:"mount_options"` - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` + Name string `hcl:"name,label"` + Type string `hcl:"type,optional"` + Source string `hcl:"source,optional"` + ReadOnly bool `hcl:"read_only,optional"` + MountOptions *CSIMountOptions `hcl:"mount_options,block"` + ExtraKeysHCL []string `hcl1:",unusedKeys,optional" json:"-"` } const ( @@ -394,10 +394,10 @@ const ( // VolumeMount represents the relationship between a destination path in a task // and the task group volume that should be mounted there. type VolumeMount struct { - Volume *string - Destination *string - ReadOnly *bool `mapstructure:"read_only"` - PropagationMode *string `mapstructure:"propagation_mode"` + Volume *string `hcl:"volume,optional"` + Destination *string `hcl:"destination,optional"` + ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"` + PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"` } func (vm *VolumeMount) Canonicalize() { @@ -411,24 +411,24 @@ func (vm *VolumeMount) Canonicalize() { // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name *string - Count *int - Constraints []*Constraint - Affinities []*Affinity - Tasks []*Task - Spreads []*Spread - Volumes map[string]*VolumeRequest - RestartPolicy *RestartPolicy - ReschedulePolicy *ReschedulePolicy - EphemeralDisk *EphemeralDisk - Update *UpdateStrategy - Migrate *MigrateStrategy - Networks []*NetworkResource - Meta map[string]string - Services []*Service - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"` - StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect"` - Scaling *ScalingPolicy + Name *string `hcl:"name,label"` + Count *int `hcl:"count,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Tasks []*Task `hcl:"task,block"` + Spreads []*Spread `hcl:"spread,block"` + Volumes map[string]*VolumeRequest `hcl:"volume,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` + EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` + Update *UpdateStrategy `hcl:"update,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Networks []*NetworkResource `hcl:"network,block"` + Meta map[string]string `hcl:"meta,block"` + Services []*Service `hcl:"service,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` + Scaling *ScalingPolicy `hcl:"scaling,block"` } // NewTaskGroup creates a new TaskGroup. @@ -605,8 +605,8 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup { // LogConfig provides configuration for log rotation type LogConfig struct { - MaxFiles *int `mapstructure:"max_files"` - MaxFileSizeMB *int `mapstructure:"max_file_size"` + MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"` + MaxFileSizeMB *int `mapstructure:"max_file_size" hcl:"max_file_size,optional"` } func DefaultLogConfig() *LogConfig { @@ -627,17 +627,17 @@ func (l *LogConfig) Canonicalize() { // DispatchPayloadConfig configures how a task gets its input from a job dispatch type DispatchPayloadConfig struct { - File string + File string `hcl:"file,optional"` } const ( - TaskLifecycleHookPrestart = "prestart" + TaskLifecycleHookPrestart = "prestart" TaskLifecycleHookPoststart = "poststart" ) type TaskLifecycle struct { - Hook string `mapstructure:"hook"` - Sidecar bool `mapstructure:"sidecar"` + Hook string `mapstructure:"hook" hcl:"hook,optional"` + Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"` } // Determine if lifecycle has user-input values @@ -647,30 +647,30 @@ func (l *TaskLifecycle) Empty() bool { // Task is a single process in a task group. type Task struct { - Name string - Driver string - User string - Lifecycle *TaskLifecycle - Config map[string]interface{} - Constraints []*Constraint - Affinities []*Affinity - Env map[string]string - Services []*Service - Resources *Resources - RestartPolicy *RestartPolicy - Meta map[string]string - KillTimeout *time.Duration `mapstructure:"kill_timeout"` - LogConfig *LogConfig `mapstructure:"logs"` - Artifacts []*TaskArtifact - Vault *Vault - Templates []*Template - DispatchPayload *DispatchPayloadConfig - VolumeMounts []*VolumeMount - CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty"` - Leader bool - ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` - KillSignal string `mapstructure:"kill_signal"` - Kind string + Name string `hcl:"name,label"` + Driver string `hcl:"driver,optional"` + User string `hcl:"user,optional"` + Lifecycle *TaskLifecycle `hcl:"lifecycle,block"` + Config map[string]interface{} `hcl:"config,block"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Env map[string]string `hcl:"env,block"` + Services []*Service `hcl:"service,block"` + Resources *Resources `hcl:"resources,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + Meta map[string]string `hcl:"meta,block"` + KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` + LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` + Artifacts []*TaskArtifact `hcl:"artifact,block"` + Vault *Vault `hcl:"vault,block"` + Templates []*Template `hcl:"template,block"` + DispatchPayload *DispatchPayloadConfig `hcl:"dispatch_payload,block"` + VolumeMounts []*VolumeMount `hcl:"volume_mount,block"` + CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty" hcl:"csi_plugin,block"` + Leader bool `hcl:"leader,optional"` + ShutdownDelay time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` + Kind string `hcl:"kind,optional"` } func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { @@ -723,10 +723,10 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource *string `mapstructure:"source"` - GetterOptions map[string]string `mapstructure:"options"` - GetterMode *string `mapstructure:"mode"` - RelativeDest *string `mapstructure:"destination"` + GetterSource *string `mapstructure:"source" hcl:"source,optional"` + GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` + GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` + RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` } func (a *TaskArtifact) Canonicalize() { @@ -753,17 +753,17 @@ func (a *TaskArtifact) Canonicalize() { } type Template struct { - SourcePath *string `mapstructure:"source"` - DestPath *string `mapstructure:"destination"` - EmbeddedTmpl *string `mapstructure:"data"` - ChangeMode *string `mapstructure:"change_mode"` - ChangeSignal *string `mapstructure:"change_signal"` - Splay *time.Duration `mapstructure:"splay"` - Perms *string `mapstructure:"perms"` - LeftDelim *string `mapstructure:"left_delimiter"` - RightDelim *string `mapstructure:"right_delimiter"` - Envvars *bool `mapstructure:"env"` - VaultGrace *time.Duration `mapstructure:"vault_grace"` + SourcePath *string `mapstructure:"source" hcl:"source,optional"` + DestPath *string `mapstructure:"destination" hcl:"destination,optional"` + EmbeddedTmpl *string `mapstructure:"data" hcl:"data,optional"` + ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` + Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"` + Perms *string `mapstructure:"perms" hcl:"perms,optional"` + LeftDelim *string `mapstructure:"left_delimiter" hcl:"left_delimiter,optional"` + RightDelim *string `mapstructure:"right_delimiter" hcl:"right_delimiter,optional"` + Envvars *bool `mapstructure:"env" hcl:"env,optional"` + VaultGrace *time.Duration `mapstructure:"vault_grace" hcl:"vault_grace,optional"` } func (tmpl *Template) Canonicalize() { @@ -812,11 +812,11 @@ func (tmpl *Template) Canonicalize() { } type Vault struct { - Policies []string - Namespace *string `mapstructure:"namespace"` - Env *bool - ChangeMode *string `mapstructure:"change_mode"` - ChangeSignal *string `mapstructure:"change_signal"` + Policies []string `hcl:"policies,optional"` + Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` + Env *bool `hcl:"env,optional"` + ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` } func (v *Vault) Canonicalize() { @@ -975,10 +975,10 @@ const ( type TaskCSIPluginConfig struct { // ID is the identifier of the plugin. // Ideally this should be the FQDN of the plugin. - ID string `mapstructure:"id"` + ID string `mapstructure:"id" hcl:"id,optional"` // CSIPluginType instructs Nomad on how to handle processing a plugin - Type CSIPluginType `mapstructure:"type"` + Type CSIPluginType `mapstructure:"type" hcl:"type,optional"` // MountDir is the destination that nomad should mount in its CSI // directory for the plugin. It will then expect a file called CSISocketName @@ -986,7 +986,7 @@ type TaskCSIPluginConfig struct { // "MountDir/CSIIntermediaryDirname/VolumeName/AllocID for mounts. // // Default is /csi. - MountDir string `mapstructure:"mount_dir"` + MountDir string `mapstructure:"mount_dir" hcl:"mount_dir,optional"` } func (t *TaskCSIPluginConfig) Canonicalize() { diff --git a/client/allocrunner/taskrunner/getter/getter.go b/client/allocrunner/taskrunner/getter/getter.go index f40d4f306ba7..a1abb456a9cf 100644 --- a/client/allocrunner/taskrunner/getter/getter.go +++ b/client/allocrunner/taskrunner/getter/getter.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net/url" + "path/filepath" "strings" "sync" @@ -98,8 +99,12 @@ func GetArtifact(taskEnv EnvReplacer, artifact *structs.TaskArtifact, taskDir st } // Verify the destination is still in the task sandbox after interpolation - dest, err := helper.GetPathInSandbox(taskDir, artifact.RelativeDest) - if err != nil { + // Note: we *always* join here even if we get passed an absolute path so + // that $NOMAD_SECRETS_DIR and friends can be used and always fall inside + // the task working directory + dest := filepath.Join(taskDir, artifact.RelativeDest) + escapes := helper.PathEscapesSandbox(taskDir, dest) + if escapes { return newGetError(artifact.RelativeDest, errors.New("artifact destination path escapes the alloc directory"), false) } diff --git a/client/allocrunner/taskrunner/template/template.go b/client/allocrunner/taskrunner/template/template.go index ee73c83d3223..2bf11400ca69 100644 --- a/client/allocrunner/taskrunner/template/template.go +++ b/client/allocrunner/taskrunner/template/template.go @@ -551,19 +551,27 @@ func parseTemplateConfigs(config *TaskTemplateManagerConfig) (map[*ctconf.Templa ctmpls := make(map[*ctconf.TemplateConfig]*structs.Template, len(config.Templates)) for _, tmpl := range config.Templates { var src, dest string - var err error if tmpl.SourcePath != "" { src = taskEnv.ReplaceEnv(tmpl.SourcePath) - src, err = helper.GetPathInSandbox(config.TaskDir, src) - if err != nil && sandboxEnabled { + if !filepath.IsAbs(src) { + src = filepath.Join(config.TaskDir, src) + } else { + src = filepath.Clean(src) + } + escapes := helper.PathEscapesSandbox(config.TaskDir, src) + if escapes && sandboxEnabled { return nil, fmt.Errorf("template source path escapes alloc directory") } } if tmpl.DestPath != "" { dest = taskEnv.ReplaceEnv(tmpl.DestPath) - dest, err = helper.GetPathInSandbox(config.TaskDir, dest) - if err != nil && sandboxEnabled { + // Note: we *always* join here even if we get passed an absolute + // path so that $NOMAD_SECRETS_DIR and friends can be used and + // always fall inside the task working directory + dest = filepath.Join(config.TaskDir, dest) + escapes := helper.PathEscapesSandbox(config.TaskDir, dest) + if escapes && sandboxEnabled { return nil, fmt.Errorf("template destination path escapes alloc directory") } } diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 98a7a9bb9414..23e05f637030 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/jobspec" + "github.com/hashicorp/nomad/jobspec2" "github.com/hashicorp/nomad/nomad/structs" ) @@ -675,7 +676,14 @@ func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Reques } jobfile := strings.NewReader(args.JobHCL) - jobStruct, err := jobspec.Parse(jobfile) + + var jobStruct *api.Job + var err error + if args.HCLv1 { + jobStruct, err = jobspec.Parse(jobfile) + } else { + jobStruct, err = jobspec2.ParseWithArgs("input.hcl", jobfile, nil, false) + } if err != nil { return nil, CodedError(400, err.Error()) } diff --git a/command/helpers.go b/command/helpers.go index 36e0e1d0abdb..2e0bc88dcda6 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "strconv" "strings" "time" @@ -14,6 +15,7 @@ import ( gg "github.com/hashicorp/go-getter" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/jobspec" + "github.com/hashicorp/nomad/jobspec2" "github.com/hashicorp/nomad/version" "github.com/kr/text" "github.com/mitchellh/cli" @@ -379,13 +381,20 @@ READ: } type JobGetter struct { + hcl1 bool + // The fields below can be overwritten for tests testStdin io.Reader } // StructJob returns the Job struct from jobfile. func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { + return j.ApiJobWithArgs(jpath, nil) +} + +func (j *JobGetter) ApiJobWithArgs(jpath string, vars map[string]string) (*api.Job, error) { var jobfile io.Reader + pathName := filepath.Base(jpath) switch jpath { case "-": if j.testStdin != nil { @@ -393,6 +402,7 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { } else { jobfile = os.Stdin } + pathName = "stdin.hcl" default: if len(jpath) == 0 { return nil, fmt.Errorf("Error jobfile path has to be specified.") @@ -433,9 +443,15 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { } // Parse the JobFile - jobStruct, err := jobspec.Parse(jobfile) + var jobStruct *api.Job + var err error + if j.hcl1 { + jobStruct, err = jobspec.Parse(jobfile) + } else { + jobStruct, err = jobspec2.ParseWithArgs(pathName, jobfile, vars, true) + } if err != nil { - return nil, fmt.Errorf("Error parsing job file from %s: %v", jpath, err) + return nil, fmt.Errorf("Error parsing job file from %s:\n%v", jpath, err) } return jobStruct, nil @@ -509,8 +525,31 @@ func (w *uiErrorWriter) Close() error { return nil } +<<<<<<< HEAD // serverVersionMatchesClient has the primary goal of checking the // version of Nomad reported by the server. func serverVersionMatchesClient(serverVersion string) bool { return serverVersion == version.GetVersion().FullVersionNumber(true) +======= +// parseVars decodes a slice of `=` or `` strings into a golang map. +// +// `` without corresponding value, is mapped to the `` environment variable. +func parseVars(vars []string) map[string]string { + if len(vars) == 0 { + return nil + } + + result := make(map[string]string, len(vars)) + for _, v := range vars { + parts := strings.SplitN(v, "=", 2) + k := parts[0] + if len(parts) == 2 { + result[k] = parts[1] + } else { + result[k] = os.Getenv(k) + } + } + + return result +>>>>>>> nomad-upstream } diff --git a/command/helpers_test.go b/command/helpers_test.go index 553f822f8224..dd2f5e113ba2 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -209,20 +209,20 @@ func TestHelpers_LineLimitReader_TimeLimit(t *testing.T) { const ( job = `job "job1" { - type = "service" - datacenters = [ "dc1" ] - group "group1" { - count = 1 - task "task1" { - driver = "exec" - resources = {} - } - restart{ - attempts = 10 - mode = "delay" - interval = "15s" - } - } + type = "service" + datacenters = ["dc1"] + group "group1" { + count = 1 + task "task1" { + driver = "exec" + resources {} + } + restart { + attempts = 10 + mode = "delay" + interval = "15s" + } + } }` ) @@ -392,3 +392,14 @@ func TestUiErrorWriter(t *testing.T) { expectedErr += "and thensome more\n" require.Equal(t, expectedErr, errBuf.String()) } + +func TestParseVars(t *testing.T) { + input := []string{"key1=val1", "HOME", "key2=321"} + expected := map[string]string{ + "key1": "val1", + "HOME": os.Getenv("HOME"), + "key2": "321", + } + + require.Equal(t, expected, parseVars(input)) +} diff --git a/command/job_plan.go b/command/job_plan.go index e44171f24c71..ff0fe9aebdc2 100644 --- a/command/job_plan.go +++ b/command/job_plan.go @@ -6,6 +6,7 @@ import ( "strings" "time" + cflags "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/scheduler" "github.com/posener/complete" @@ -72,6 +73,9 @@ Plan Options: Determines whether the diff between the remote job and planned job is shown. Defaults to true. + -hcl1 + Parses the job file as HCLv1. + -policy-override Sets the flag to force override any soft mandatory Sentinel policies. @@ -91,6 +95,8 @@ func (c *JobPlanCommand) AutocompleteFlags() complete.Flags { "-diff": complete.PredictNothing, "-policy-override": complete.PredictNothing, "-verbose": complete.PredictNothing, + "-hcl1": complete.PredictNothing, + "-var": complete.PredictAnything, }) } @@ -101,12 +107,15 @@ func (c *JobPlanCommand) AutocompleteArgs() complete.Predictor { func (c *JobPlanCommand) Name() string { return "job plan" } func (c *JobPlanCommand) Run(args []string) int { var diff, policyOverride, verbose bool + var varArgs cflags.AppendSliceValue flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.BoolVar(&diff, "diff", true, "") flags.BoolVar(&policyOverride, "policy-override", false, "") flags.BoolVar(&verbose, "verbose", false, "") + flags.BoolVar(&c.JobGetter.hcl1, "hcl1", false, "") + flags.Var(&varArgs, "var", "") if err := flags.Parse(args); err != nil { return 255 @@ -122,7 +131,7 @@ func (c *JobPlanCommand) Run(args []string) int { path := args[0] // Get Job struct from Jobfile - job, err := c.JobGetter.ApiJob(args[0]) + job, err := c.JobGetter.ApiJobWithArgs(args[0], parseVars(varArgs)) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 255 diff --git a/command/job_plan_test.go b/command/job_plan_test.go index be0b5ac2b8db..3173705198db 100644 --- a/command/job_plan_test.go +++ b/command/job_plan_test.go @@ -94,7 +94,7 @@ job "job1" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } @@ -134,7 +134,7 @@ job "job1" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } diff --git a/command/job_run.go b/command/job_run.go index e64966163487..24bedb390991 100644 --- a/command/job_run.go +++ b/command/job_run.go @@ -9,6 +9,7 @@ import ( "strings" "time" + cflags "github.com/hashicorp/consul/command/flags" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/helper" "github.com/posener/complete" @@ -79,6 +80,9 @@ Run Options: the evaluation ID will be printed to the screen, which can be used to examine the evaluation using the eval-status command. + -hcl1 + Parses the job file as HCLv1. + -output Output the JSON that would be submitted to the HTTP API without submitting the job. @@ -88,7 +92,7 @@ Run Options: -preserve-counts If set, the existing task group counts will be preserved when updating a job. - + -consul-token If set, the passed Consul token is stored in the job before sending to the Nomad servers. This allows passing the Consul token without storing it in @@ -127,6 +131,8 @@ func (c *JobRunCommand) AutocompleteFlags() complete.Flags { "-output": complete.PredictNothing, "-policy-override": complete.PredictNothing, "-preserve-counts": complete.PredictNothing, + "-hcl1": complete.PredictNothing, + "-var": complete.PredictAnything, }) } @@ -139,6 +145,7 @@ func (c *JobRunCommand) Name() string { return "job run" } func (c *JobRunCommand) Run(args []string) int { var detach, verbose, output, override, preserveCounts bool var checkIndexStr, consulToken, vaultToken, vaultNamespace string + var varArgs cflags.AppendSliceValue flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } @@ -147,10 +154,12 @@ func (c *JobRunCommand) Run(args []string) int { flags.BoolVar(&output, "output", false, "") flags.BoolVar(&override, "policy-override", false, "") flags.BoolVar(&preserveCounts, "preserve-counts", false, "") + flags.BoolVar(&c.JobGetter.hcl1, "hcl1", false, "") flags.StringVar(&checkIndexStr, "check-index", "", "") flags.StringVar(&consulToken, "consul-token", "", "") flags.StringVar(&vaultToken, "vault-token", "", "") flags.StringVar(&vaultNamespace, "vault-namespace", "", "") + flags.Var(&varArgs, "var", "") if err := flags.Parse(args); err != nil { return 1 @@ -171,7 +180,7 @@ func (c *JobRunCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.ApiJob(args[0]) + job, err := c.JobGetter.ApiJobWithArgs(args[0], parseVars(varArgs)) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 diff --git a/command/job_run_test.go b/command/job_run_test.go index cb860cc81da4..80609aed9f7d 100644 --- a/command/job_run_test.go +++ b/command/job_run_test.go @@ -33,7 +33,7 @@ job "job1" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } @@ -127,7 +127,7 @@ job "job1" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } @@ -177,7 +177,7 @@ job "job1" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } diff --git a/command/job_validate.go b/command/job_validate.go index 34a037955ee0..a0ddc022571b 100644 --- a/command/job_validate.go +++ b/command/job_validate.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + cflags "github.com/hashicorp/consul/command/flags" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/command/agent" @@ -27,6 +28,11 @@ Alias: nomad validate If the supplied path is "-", the jobfile is read from stdin. Otherwise it is read from the file at the supplied path or downloaded and read from URL specified. + +Validate Options: + + -hcl1 + Parses the job file as HCLv1. ` return strings.TrimSpace(helpText) } @@ -36,7 +42,10 @@ func (c *JobValidateCommand) Synopsis() string { } func (c *JobValidateCommand) AutocompleteFlags() complete.Flags { - return nil + return complete.Flags{ + "-hcl1": complete.PredictNothing, + "-var": complete.PredictAnything, + } } func (c *JobValidateCommand) AutocompleteArgs() complete.Predictor { @@ -46,8 +55,13 @@ func (c *JobValidateCommand) AutocompleteArgs() complete.Predictor { func (c *JobValidateCommand) Name() string { return "job validate" } func (c *JobValidateCommand) Run(args []string) int { + var varArgs cflags.AppendSliceValue + flags := c.Meta.FlagSet(c.Name(), FlagSetNone) flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.BoolVar(&c.JobGetter.hcl1, "hcl1", false, "") + flags.Var(&varArgs, "var", "") + if err := flags.Parse(args); err != nil { return 1 } @@ -61,7 +75,7 @@ func (c *JobValidateCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.ApiJob(args[0]) + job, err := c.JobGetter.ApiJobWithArgs(args[0], parseVars(varArgs)) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 diff --git a/command/job_validate_test.go b/command/job_validate_test.go index c6effae07edc..6e51f33d2167 100644 --- a/command/job_validate_test.go +++ b/command/job_validate_test.go @@ -40,7 +40,7 @@ job "job1" { config { command = "/bin/sleep" } - resources = { + resources { cpu = 1000 memory = 512 } @@ -142,7 +142,7 @@ job "job1" { config { command = "/bin/echo" } - resources = { + resources { cpu = 1000 memory = 512 } diff --git a/command/operator_snapshot_restore_test.go b/command/operator_snapshot_restore_test.go index f9b87e729bef..be88fee6f523 100644 --- a/command/operator_snapshot_restore_test.go +++ b/command/operator_snapshot_restore_test.go @@ -30,7 +30,7 @@ job "snapshot-test-job" { count = 1 task "task1" { driver = "exec" - resources = { + resources { cpu = 1000 memory = 512 } diff --git a/e2e/consultemplate/input/templating.nomad b/e2e/consultemplate/input/templating.nomad index 7ae21af0a3ac..39ff3d8c8b16 100644 --- a/e2e/consultemplate/input/templating.nomad +++ b/e2e/consultemplate/input/templating.nomad @@ -24,7 +24,7 @@ job "templating" { server {{ .Name }} {{ .Address }}:{{ .Port }}{{ end }} EOT - destination = "local/services.conf" + destination = "${NOMAD_TASK_DIR}/services.conf" change_mode = "noop" } @@ -35,7 +35,7 @@ key: {{ key "consultemplatetest" }} job: {{ env "NOMAD_JOB_NAME" }} EOT - destination = "local/kv.yml" + destination = "${NOMAD_TASK_DIR}/kv.yml" change_mode = "restart" } @@ -63,7 +63,7 @@ EOT server {{ .Name }} {{ .Address }}:{{ .Port }}{{ end }} EOT - destination = "local/services.conf" + destination = "${NOMAD_TASK_DIR}/services.conf" change_mode = "noop" } @@ -73,7 +73,7 @@ EOT key: {{ key "consultemplatetest" }} job: {{ env "NOMAD_JOB_NAME" }} EOT - destination = "local/kv.yml" + destination = "${NOMAD_TASK_DIR}/kv.yml" change_mode = "restart" } diff --git a/go.mod b/go.mod index 0e8849082a5c..4a586d3bd272 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ replace ( github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible github.com/golang/protobuf => github.com/golang/protobuf v1.3.4 github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f + github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee github.com/hashicorp/nomad/api => ./api github.com/kr/pty => github.com/kr/pty v1.1.5 ) @@ -57,6 +58,7 @@ require ( github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-connlimit v0.2.0 + github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22 github.com/hashicorp/go-getter v1.5.0 @@ -71,8 +73,8 @@ require ( github.com/hashicorp/go-uuid v1.0.1 github.com/hashicorp/go-version v1.2.1-0.20191009193637-2046c9d0f0b0 github.com/hashicorp/golang-lru v0.5.4 - github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c - github.com/hashicorp/hcl/v2 v2.5.1 + github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee + github.com/hashicorp/hcl/v2 v2.7.1-0.20201020204811-68a97f93bb48 github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/memberlist v0.2.2 github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 @@ -97,6 +99,7 @@ require ( github.com/mitchellh/go-testing-interface v1.0.3 github.com/mitchellh/hashstructure v1.0.0 github.com/mitchellh/mapstructure v1.3.1 + github.com/mitchellh/reflectwalk v1.0.1 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/oklog/run v1.0.1-0.20180308005104-6934b124db28 // indirect github.com/onsi/gomega v1.9.0 // indirect @@ -116,6 +119,7 @@ require ( github.com/stretchr/testify v1.6.1 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/zclconf/go-cty v1.4.1 + github.com/zclconf/go-cty-yaml v1.0.2 go.opencensus.io v0.22.1-0.20190713072201-b4a14686f0a9 // indirect golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 // indirect diff --git a/go.sum b/go.sum index fcacb46163c0..6022dca18b6c 100644 --- a/go.sum +++ b/go.sum @@ -85,6 +85,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= +github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= @@ -117,6 +119,8 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk= +github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -307,6 +311,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -355,6 +360,8 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-connlimit v0.2.0 h1:OZjcfNxH/hPh/bT2Iw5yOJcLzz+zuIWpsp3I1S4Pjw4= github.com/hashicorp/go-connlimit v0.2.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= +github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc= +github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840/go.mod h1:Abjk0jbRkDaNCzsRhOv2iDCofYpX1eVsjozoiK63qLA= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22 h1:HTmDIaSN95gbdMyrsbNiXSdW4fbGctGQwEqv0H7OhDQ= @@ -415,11 +422,10 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= -github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.5.1 h1:5ytFZykUu2/4U59ogd2f+XZdi9+6oC/Tv5WzsH6fIDA= -github.com/hashicorp/hcl/v2 v2.5.1/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee h1:8B4HqvMUtYSjsGkYjiQGStc9pXffY2J+Z2SPQAj+wMY= +github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee/go.mod h1:gwlu9+/P9MmKtYrMsHeFRZPXj2CTPm11TDnMeaRHS7g= +github.com/hashicorp/hcl/v2 v2.7.1-0.20201020204811-68a97f93bb48 h1:iaau0VStfX9CgOlpbceawI94uVEM3sliqnjpHSVQqUo= +github.com/hashicorp/hcl/v2 v2.7.1-0.20201020204811-68a97f93bb48/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5 h1:uk280DXEbQiCOZgCOI3elFSeNxf8YIZiNsbr2pQLYD0= github.com/hashicorp/hil v0.0.0-20160711231837-1e86c6b523c5/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= @@ -741,9 +747,13 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= github.com/zclconf/go-cty v1.4.1 h1:Xzr4m4utRDhHDifag1onwwUSq32HLoLBsp+w6tD0880= github.com/zclconf/go-cty v1.4.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= +github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0= +github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -765,6 +775,7 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191106202628-ed6320f186d4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/helper/funcs.go b/helper/funcs.go index 153850c22ce6..d0277fa9f796 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -514,21 +514,16 @@ func CheckNamespaceScope(provided string, requested []string) []string { return nil } -// GetPathInSandbox returns a cleaned path inside the sandbox directory -// (typically this will be the allocation directory). Relative paths will be -// joined to the sandbox directory. Returns an error if the path escapes the -// sandbox directory. -func GetPathInSandbox(sandboxDir, path string) (string, error) { - if !filepath.IsAbs(path) { - path = filepath.Join(sandboxDir, path) - } - path = filepath.Clean(path) +// PathEscapesSandbox returns whether previously cleaned path inside the +// sandbox directory (typically this will be the allocation directory) +// escapes. +func PathEscapesSandbox(sandboxDir, path string) bool { rel, err := filepath.Rel(sandboxDir, path) if err != nil { - return path, err + return true } if strings.HasPrefix(rel, "..") { - return path, fmt.Errorf("%q escapes sandbox directory", path) + return true } - return path, nil + return false } diff --git a/helper/funcs_test.go b/helper/funcs_test.go index e209e9647f14..4e32681866b2 100644 --- a/helper/funcs_test.go +++ b/helper/funcs_test.go @@ -2,6 +2,7 @@ package helper import ( "fmt" + "path/filepath" "reflect" "sort" "testing" @@ -245,70 +246,86 @@ func TestCheckNamespaceScope(t *testing.T) { } } -func TestGetPathInSandbox(t *testing.T) { +func TestPathEscapesSandbox(t *testing.T) { cases := []struct { - name string - path string - dir string - expected string - expectedErr string + name string + path string + dir string + expected bool }{ { - name: "ok absolute path inside sandbox", - path: "/alloc/safe", + // this is the ${NOMAD_SECRETS_DIR} case + name: "ok joined absolute path inside sandbox", + path: filepath.Join("/alloc", "/secrets"), dir: "/alloc", - expected: "/alloc/safe", + expected: false, }, { - name: "ok relative path inside sandbox", + name: "fail unjoined absolute path outside sandbox", + path: "/secrets", + dir: "/alloc", + expected: true, + }, + { + name: "ok joined relative path inside sandbox", + path: filepath.Join("/alloc", "./safe"), + dir: "/alloc", + expected: false, + }, + { + name: "fail unjoined relative path outside sandbox", path: "./safe", dir: "/alloc", - expected: "/alloc/safe", + expected: true, }, { name: "ok relative path traversal constrained to sandbox", - path: "../../alloc/safe", + path: filepath.Join("/alloc", "../../alloc/safe"), + dir: "/alloc", + expected: false, + }, + { + name: "ok unjoined absolute path traversal constrained to sandbox", + path: filepath.Join("/alloc", "/../alloc/safe"), dir: "/alloc", - expected: "/alloc/safe", + expected: false, }, { - name: "ok absolute path traversal constrained to sandbox", + name: "ok unjoined absolute path traversal constrained to sandbox", path: "/../alloc/safe", dir: "/alloc", - expected: "/alloc/safe", + expected: false, + }, + { + name: "fail joined relative path traverses outside sandbox", + path: filepath.Join("/alloc", "../../../unsafe"), + dir: "/alloc", + expected: true, }, { - name: "fail absolute path outside sandbox", - path: "/unsafe", - dir: "/alloc", - expected: "/unsafe", - expectedErr: "\"/unsafe\" escapes sandbox directory", + name: "fail unjoined relative path traverses outside sandbox", + path: "../../../unsafe", + dir: "/alloc", + expected: true, }, { - name: "fail relative path traverses outside sandbox", - path: "../../../unsafe", - dir: "/alloc", - expected: "/unsafe", - expectedErr: "\"/unsafe\" escapes sandbox directory", + name: "fail joined absolute path tries to transverse outside sandbox", + path: filepath.Join("/alloc", "/alloc/../../unsafe"), + dir: "/alloc", + expected: true, }, { - name: "fail absolute path tries to transverse outside sandbox", - path: "/alloc/../unsafe", - dir: "/alloc", - expected: "/unsafe", - expectedErr: "\"/unsafe\" escapes sandbox directory", + name: "fail unjoined absolute path tries to transverse outside sandbox", + path: "/alloc/../../unsafe", + dir: "/alloc", + expected: true, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { caseMsg := fmt.Sprintf("path: %v\ndir: %v", tc.path, tc.dir) - escapes, err := GetPathInSandbox(tc.dir, tc.path) - if tc.expectedErr != "" { - require.EqualError(t, err, tc.expectedErr, caseMsg) - } else { - require.NoError(t, err, caseMsg) - } + escapes := PathEscapesSandbox(tc.dir, tc.path) require.Equal(t, tc.expected, escapes, caseMsg) }) } diff --git a/jobspec/parse_service.go b/jobspec/parse_service.go index 73fdac41fb32..0f4d0403bdd3 100644 --- a/jobspec/parse_service.go +++ b/jobspec/parse_service.go @@ -348,6 +348,7 @@ func parseGatewayProxy(o *ast.ObjectItem) (*api.ConsulGatewayProxy, error) { if err := hcl.DecodeObject(&bind, listenerListVal); err != nil { panic(err) } + bind.Name = listenerName proxy.EnvoyGatewayBindAddresses[listenerName] = &bind } } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index ad3b2e3bc4f5..d2c064f46f2c 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -135,9 +135,10 @@ func TestParse(t *testing.T) { ExtraKeysHCL: nil, }, "bar": { - Name: "bar", - Type: "csi", - Source: "bar-vol", + Name: "bar", + Type: "csi", + Source: "bar-vol", + ReadOnly: true, MountOptions: &api.CSIMountOptions{ FSType: "ext4", }, @@ -1422,8 +1423,8 @@ func TestParse(t *testing.T) { ConnectTimeout: timeToPtr(3 * time.Second), EnvoyGatewayBindTaggedAddresses: true, EnvoyGatewayBindAddresses: map[string]*api.ConsulGatewayBindAddress{ - "listener1": {Address: "10.0.0.1", Port: 8888}, - "listener2": {Address: "10.0.0.2", Port: 8889}, + "listener1": {Name: "listener1", Address: "10.0.0.1", Port: 8888}, + "listener2": {Name: "listener2", Address: "10.0.0.2", Port: 8889}, }, EnvoyGatewayNoDefaultBind: true, Config: map[string]interface{}{"foo": "bar"}, diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index 1b99ee725ea8..b02356df34cc 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -76,8 +76,9 @@ job "binstore-storagelocker" { } volume "bar" { - type = "csi" - source = "bar-vol" + type = "csi" + source = "bar-vol" + read_only = true mount_options { fs_type = "ext4" diff --git a/jobspec/test-fixtures/tg-scaling-policy.hcl b/jobspec/test-fixtures/tg-scaling-policy.hcl index b446f7f6af61..412406b620a8 100644 --- a/jobspec/test-fixtures/tg-scaling-policy.hcl +++ b/jobspec/test-fixtures/tg-scaling-policy.hcl @@ -9,7 +9,7 @@ job "elastic" { foo = "bar" b = true val = 5 - f =.1 + f = 0.1 } } } diff --git a/jobspec/test-fixtures/tg-service-connect-sidecar_task-name.hcl b/jobspec/test-fixtures/tg-service-connect-sidecar_task-name.hcl index 99332cef8197..6fd400b6520d 100644 --- a/jobspec/test-fixtures/tg-service-connect-sidecar_task-name.hcl +++ b/jobspec/test-fixtures/tg-service-connect-sidecar_task-name.hcl @@ -6,7 +6,7 @@ job "sidecar_task_name" { name = "example" connect { - sidecar_service = {} + sidecar_service {} sidecar_task { name = "my-sidecar" diff --git a/jobspec/test-fixtures/tg-service-proxy-expose.hcl b/jobspec/test-fixtures/tg-service-proxy-expose.hcl index a9effdb44cbe..cd4494f4009d 100644 --- a/jobspec/test-fixtures/tg-service-proxy-expose.hcl +++ b/jobspec/test-fixtures/tg-service-proxy-expose.hcl @@ -7,14 +7,14 @@ job "group_service_proxy_expose" { sidecar_service { proxy { expose { - path = { + path { path = "/health" protocol = "http" local_path_port = 2222 listener_port = "healthcheck" } - path = { + path { path = "/metrics" protocol = "grpc" local_path_port = 3000 diff --git a/jobspec2/functions.go b/jobspec2/functions.go new file mode 100644 index 000000000000..2dd0f262098d --- /dev/null +++ b/jobspec2/functions.go @@ -0,0 +1,133 @@ +package jobspec2 + +import ( + "fmt" + + "github.com/hashicorp/go-cty-funcs/cidr" + "github.com/hashicorp/go-cty-funcs/crypto" + "github.com/hashicorp/go-cty-funcs/encoding" + "github.com/hashicorp/go-cty-funcs/filesystem" + "github.com/hashicorp/go-cty-funcs/uuid" + "github.com/hashicorp/hcl/v2/ext/tryfunc" + "github.com/hashicorp/hcl/v2/ext/typeexpr" + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +// Functions returns the set of functions that should be used to when +// evaluating expressions in the receiving scope. +// +// basedir is used with file functions and allows a user to reference a file +// using local path. Usually basedir is the directory in which the config file +// is located +// +func Functions(basedir string, allowFS bool) map[string]function.Function { + funcs := map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "base64decode": encoding.Base64DecodeFunc, + "base64encode": encoding.Base64EncodeFunc, + "bcrypt": crypto.BcryptFunc, + "can": tryfunc.CanFunc, + "ceil": stdlib.CeilFunc, + "chomp": stdlib.ChompFunc, + "chunklist": stdlib.ChunklistFunc, + "cidrhost": cidr.HostFunc, + "cidrnetmask": cidr.NetmaskFunc, + "cidrsubnet": cidr.SubnetFunc, + "cidrsubnets": cidr.SubnetsFunc, + "coalesce": stdlib.CoalesceFunc, + "coalescelist": stdlib.CoalesceListFunc, + "compact": stdlib.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": stdlib.ContainsFunc, + "convert": typeexpr.ConvertFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "distinct": stdlib.DistinctFunc, + "element": stdlib.ElementFunc, + "flatten": stdlib.FlattenFunc, + "floor": stdlib.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": stdlib.IndentFunc, + "index": stdlib.IndexFunc, + "join": stdlib.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": stdlib.KeysFunc, + "length": stdlib.LengthFunc, + "log": stdlib.LogFunc, + "lookup": stdlib.LookupFunc, + "lower": stdlib.LowerFunc, + "max": stdlib.MaxFunc, + "md5": crypto.Md5Func, + "merge": stdlib.MergeFunc, + "min": stdlib.MinFunc, + "parseint": stdlib.ParseIntFunc, + "pow": stdlib.PowFunc, + "range": stdlib.RangeFunc, + "reverse": stdlib.ReverseFunc, + "replace": stdlib.ReplaceFunc, + "regex_replace": stdlib.RegexReplaceFunc, + "rsadecrypt": crypto.RsaDecryptFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": stdlib.SetProductFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": crypto.Sha1Func, + "sha256": crypto.Sha256Func, + "sha512": crypto.Sha512Func, + "signum": stdlib.SignumFunc, + "slice": stdlib.SliceFunc, + "sort": stdlib.SortFunc, + "split": stdlib.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timeadd": stdlib.TimeAddFunc, + "title": stdlib.TitleFunc, + "trim": stdlib.TrimFunc, + "trimprefix": stdlib.TrimPrefixFunc, + "trimspace": stdlib.TrimSpaceFunc, + "trimsuffix": stdlib.TrimSuffixFunc, + "try": tryfunc.TryFunc, + "upper": stdlib.UpperFunc, + "urlencode": encoding.URLEncodeFunc, + "uuidv4": uuid.V4Func, + "uuidv5": uuid.V5Func, + "values": stdlib.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": stdlib.ZipmapFunc, + + // filesystem calls + "abspath": guardFS(allowFS, filesystem.AbsPathFunc), + "basename": guardFS(allowFS, filesystem.BasenameFunc), + "dirname": guardFS(allowFS, filesystem.DirnameFunc), + "file": guardFS(allowFS, filesystem.MakeFileFunc(basedir, false)), + "fileexists": guardFS(allowFS, filesystem.MakeFileExistsFunc(basedir)), + "fileset": guardFS(allowFS, filesystem.MakeFileSetFunc(basedir)), + "pathexpand": guardFS(allowFS, filesystem.PathExpandFunc), + } + + return funcs +} + +func guardFS(allowFS bool, fn function.Function) function.Function { + if allowFS { + return fn + } + + spec := &function.Spec{ + Params: fn.Params(), + VarParam: fn.VarParam(), + Type: func([]cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, fmt.Errorf("filesystem function disabled") + }, + Impl: func([]cty.Value, cty.Type) (cty.Value, error) { + return cty.DynamicVal, fmt.Errorf("filesystem functions disabled") + }, + } + + return function.New(spec) +} diff --git a/jobspec2/hcl_conversions.go b/jobspec2/hcl_conversions.go new file mode 100644 index 000000000000..413a9e148f0e --- /dev/null +++ b/jobspec2/hcl_conversions.go @@ -0,0 +1,281 @@ +package jobspec2 + +import ( + "fmt" + "reflect" + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/nomad/api" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +var hclDecoder *gohcl.Decoder + +func init() { + hclDecoder = newHCLDecoder() + hclDecoder.RegisterBlockDecoder(reflect.TypeOf(api.TaskGroup{}), decodeTaskGroup) +} + +func newHCLDecoder() *gohcl.Decoder { + decoder := &gohcl.Decoder{} + + // time conversion + d := time.Duration(0) + decoder.RegisterExpressionDecoder(reflect.TypeOf(d), decodeDuration) + decoder.RegisterExpressionDecoder(reflect.TypeOf(&d), decodeDuration) + + // custom nomad types + decoder.RegisterBlockDecoder(reflect.TypeOf(api.Affinity{}), decodeAffinity) + decoder.RegisterBlockDecoder(reflect.TypeOf(api.Constraint{}), decodeConstraint) + decoder.RegisterBlockDecoder(reflect.TypeOf(jobWrapper{}), decodeJob) + + return decoder +} + +func decodeDuration(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + if srcVal.Type() == cty.String { + dur, err := time.ParseDuration(srcVal.AsString()) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable duration value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + srcVal = cty.NumberIntVal(int64(dur)) + } + + if srcVal.Type() != cty.Number { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: expected a string but found %s", srcVal.Type()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + + } + + err := gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} + +var affinitySpec = hcldec.ObjectSpec{ + "attribute": &hcldec.AttrSpec{Name: "attribute", Type: cty.String, Required: false}, + "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, + "operator": &hcldec.AttrSpec{Name: "operator", Type: cty.String, Required: false}, + "weight": &hcldec.AttrSpec{Name: "weight", Type: cty.Number, Required: false}, + + api.ConstraintVersion: &hcldec.AttrSpec{Name: api.ConstraintVersion, Type: cty.String, Required: false}, + api.ConstraintSemver: &hcldec.AttrSpec{Name: api.ConstraintSemver, Type: cty.String, Required: false}, + api.ConstraintRegex: &hcldec.AttrSpec{Name: api.ConstraintRegex, Type: cty.String, Required: false}, + api.ConstraintSetContains: &hcldec.AttrSpec{Name: api.ConstraintSetContains, Type: cty.String, Required: false}, + api.ConstraintSetContainsAll: &hcldec.AttrSpec{Name: api.ConstraintSetContainsAll, Type: cty.String, Required: false}, + api.ConstraintSetContainsAny: &hcldec.AttrSpec{Name: api.ConstraintSetContainsAny, Type: cty.String, Required: false}, +} + +func decodeAffinity(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + a := val.(*api.Affinity) + v, diags := hcldec.Decode(body, affinitySpec, ctx) + if len(diags) != 0 { + return diags + } + + attr := func(attr string) string { + a := v.GetAttr(attr) + if a.IsNull() { + return "" + } + return a.AsString() + } + a.LTarget = attr("attribute") + a.RTarget = attr("value") + a.Operand = attr("operator") + weight := v.GetAttr("weight") + if !weight.IsNull() { + w, _ := weight.AsBigFloat().Int64() + a.Weight = int8ToPtr(int8(w)) + } + + // If "version" is provided, set the operand + // to "version" and the value to the "RTarget" + if affinity := attr(api.ConstraintVersion); affinity != "" { + a.Operand = api.ConstraintVersion + a.RTarget = affinity + } + + // If "semver" is provided, set the operand + // to "semver" and the value to the "RTarget" + if affinity := attr(api.ConstraintSemver); affinity != "" { + a.Operand = api.ConstraintSemver + a.RTarget = affinity + } + + // If "regexp" is provided, set the operand + // to "regexp" and the value to the "RTarget" + if affinity := attr(api.ConstraintRegex); affinity != "" { + a.Operand = api.ConstraintRegex + a.RTarget = affinity + } + + // If "set_contains_any" is provided, set the operand + // to "set_contains_any" and the value to the "RTarget" + if affinity := attr(api.ConstraintSetContainsAny); affinity != "" { + a.Operand = api.ConstraintSetContainsAny + a.RTarget = affinity + } + + // If "set_contains_all" is provided, set the operand + // to "set_contains_all" and the value to the "RTarget" + if affinity := attr(api.ConstraintSetContainsAll); affinity != "" { + a.Operand = api.ConstraintSetContainsAll + a.RTarget = affinity + } + + // set_contains is a synonym of set_contains_all + if affinity := attr(api.ConstraintSetContains); affinity != "" { + a.Operand = api.ConstraintSetContains + a.RTarget = affinity + } + + if a.Operand == "" { + a.Operand = "=" + } + return diags +} + +var constraintSpec = hcldec.ObjectSpec{ + "attribute": &hcldec.AttrSpec{Name: "attribute", Type: cty.String, Required: false}, + "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, + "operator": &hcldec.AttrSpec{Name: "operator", Type: cty.String, Required: false}, + + api.ConstraintDistinctProperty: &hcldec.AttrSpec{Name: api.ConstraintDistinctProperty, Type: cty.String, Required: false}, + api.ConstraintDistinctHosts: &hcldec.AttrSpec{Name: api.ConstraintDistinctHosts, Type: cty.Bool, Required: false}, + api.ConstraintRegex: &hcldec.AttrSpec{Name: api.ConstraintRegex, Type: cty.String, Required: false}, + api.ConstraintVersion: &hcldec.AttrSpec{Name: api.ConstraintVersion, Type: cty.String, Required: false}, + api.ConstraintSemver: &hcldec.AttrSpec{Name: api.ConstraintSemver, Type: cty.String, Required: false}, + api.ConstraintSetContains: &hcldec.AttrSpec{Name: api.ConstraintSetContains, Type: cty.String, Required: false}, + api.ConstraintSetContainsAll: &hcldec.AttrSpec{Name: api.ConstraintSetContainsAll, Type: cty.String, Required: false}, + api.ConstraintSetContainsAny: &hcldec.AttrSpec{Name: api.ConstraintSetContainsAny, Type: cty.String, Required: false}, + api.ConstraintAttributeIsSet: &hcldec.AttrSpec{Name: api.ConstraintAttributeIsSet, Type: cty.String, Required: false}, + api.ConstraintAttributeIsNotSet: &hcldec.AttrSpec{Name: api.ConstraintAttributeIsNotSet, Type: cty.String, Required: false}, +} + +func decodeConstraint(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + c := val.(*api.Constraint) + + v, diags := hcldec.Decode(body, constraintSpec, ctx) + if len(diags) != 0 { + return diags + } + + attr := func(attr string) string { + a := v.GetAttr(attr) + if a.IsNull() { + return "" + } + return a.AsString() + } + + c.LTarget = attr("attribute") + c.RTarget = attr("value") + c.Operand = attr("operator") + + // If "version" is provided, set the operand + // to "version" and the value to the "RTarget" + if constraint := attr(api.ConstraintVersion); constraint != "" { + c.Operand = api.ConstraintVersion + c.RTarget = constraint + } + + // If "semver" is provided, set the operand + // to "semver" and the value to the "RTarget" + if constraint := attr(api.ConstraintSemver); constraint != "" { + c.Operand = api.ConstraintSemver + c.RTarget = constraint + } + + // If "regexp" is provided, set the operand + // to "regexp" and the value to the "RTarget" + if constraint := attr(api.ConstraintRegex); constraint != "" { + c.Operand = api.ConstraintRegex + c.RTarget = constraint + } + + // If "set_contains" is provided, set the operand + // to "set_contains" and the value to the "RTarget" + if constraint := attr(api.ConstraintSetContains); constraint != "" { + c.Operand = api.ConstraintSetContains + c.RTarget = constraint + } + + if d := v.GetAttr(api.ConstraintDistinctHosts); !d.IsNull() && d.True() { + c.Operand = api.ConstraintDistinctHosts + } + + if property := attr(api.ConstraintDistinctProperty); property != "" { + c.Operand = api.ConstraintDistinctProperty + c.LTarget = property + } + + if c.Operand == "" { + c.Operand = "=" + } + return diags +} + +func decodeTaskGroup(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + tg := val.(*api.TaskGroup) + tgExtra := struct { + Vault *api.Vault `hcl:"vault,block"` + }{} + + extra, _ := gohcl.ImpliedBodySchema(tgExtra) + content, tgBody, diags := body.PartialContent(extra) + if len(diags) != 0 { + return diags + } + + for _, b := range content.Blocks { + if b.Type == "vault" { + v := &api.Vault{} + diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, v)...) + tgExtra.Vault = v + } + } + + d := newHCLDecoder() + diags = d.DecodeBody(tgBody, ctx, tg) + + if tgExtra.Vault != nil { + for _, t := range tg.Tasks { + if t.Vault == nil { + t.Vault = tgExtra.Vault + } + } + } + + return diags + +} diff --git a/jobspec2/hclutil/blockattrs.go b/jobspec2/hclutil/blockattrs.go new file mode 100644 index 000000000000..a149a4c25a08 --- /dev/null +++ b/jobspec2/hclutil/blockattrs.go @@ -0,0 +1,176 @@ +package hclutil + +import ( + "github.com/hashicorp/hcl/v2" + hcls "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// BlocksAsAttrs rewrites the hcl.Body so that hcl blocks are treated as +// attributes when schema is unknown. +// +// This conversion is necessary for parsing task driver configs, as they can be +// arbitrary nested without pre-defined schema. +// +// More concretely, it changes the following: +// +// ``` +// config { +// meta { ... } +// } +// ``` + +// to +// +// ``` +// config { +// meta = { ... } # <- attribute now +// } +// ``` +func BlocksAsAttrs(body hcl.Body) hcl.Body { + if hclb, ok := body.(*hcls.Body); ok { + return &blockAttrs{body: hclb} + } + return body +} + +type blockAttrs struct { + body hcl.Body + + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} +} + +func (b *blockAttrs) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + bc, diags := b.body.Content(schema) + bc.Blocks = expandBlocks(bc.Blocks) + return bc, diags +} +func (b *blockAttrs) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + bc, remainBody, diags := b.body.PartialContent(schema) + bc.Blocks = expandBlocks(bc.Blocks) + + remain := &blockAttrs{ + body: remainBody, + hiddenAttrs: map[string]struct{}{}, + hiddenBlocks: map[string]struct{}{}, + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = struct{}{} + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = struct{}{} + } + + return bc, remain, diags +} + +func (b *blockAttrs) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + body, ok := b.body.(*hcls.Body) + if !ok { + return b.body.JustAttributes() + } + + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if body.Attributes == nil && len(body.Blocks) == 0 { + return attrs, diags + } + + for name, attr := range body.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + for _, blockS := range body.Blocks { + if _, hidden := b.hiddenBlocks[blockS.Type]; hidden { + continue + } + + attrs[blockS.Type] = convertToAttribute(blockS).AsHCLAttribute() + } + + return attrs, diags +} + +func (b *blockAttrs) MissingItemRange() hcl.Range { + return b.body.MissingItemRange() +} + +func expandBlocks(blocks hcl.Blocks) hcl.Blocks { + if len(blocks) == 0 { + return blocks + } + + r := make([]*hcl.Block, len(blocks)) + for i, b := range blocks { + nb := *b + nb.Body = BlocksAsAttrs(b.Body) + r[i] = &nb + } + return r +} + +func convertToAttribute(b *hcls.Block) *hcls.Attribute { + items := []hcls.ObjectConsItem{} + + for _, attr := range b.Body.Attributes { + keyExpr := &hcls.ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: attr.Name, + SrcRange: attr.NameRange, + }, + }, + SrcRange: attr.NameRange, + } + key := &hcls.ObjectConsKeyExpr{ + Wrapped: keyExpr, + } + + items = append(items, hcls.ObjectConsItem{ + KeyExpr: key, + ValueExpr: attr.Expr, + }) + } + + for _, block := range b.Body.Blocks { + keyExpr := &hcls.ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: block.Type, + SrcRange: block.TypeRange, + }, + }, + SrcRange: block.TypeRange, + } + key := &hcls.ObjectConsKeyExpr{ + Wrapped: keyExpr, + } + valExpr := convertToAttribute(block).Expr + items = append(items, hcls.ObjectConsItem{ + KeyExpr: key, + ValueExpr: valExpr, + }) + } + + attr := &hcls.Attribute{ + Name: b.Type, + NameRange: b.TypeRange, + EqualsRange: b.OpenBraceRange, + SrcRange: b.Body.SrcRange, + Expr: &hcls.ObjectConsExpr{ + Items: items, + }, + } + + return attr +} diff --git a/jobspec2/parse.go b/jobspec2/parse.go new file mode 100644 index 000000000000..cc04d0b55fd9 --- /dev/null +++ b/jobspec2/parse.go @@ -0,0 +1,109 @@ +package jobspec2 + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/jobspec2/hclutil" + "github.com/zclconf/go-cty/cty" +) + +func Parse(path string, r io.Reader) (*api.Job, error) { + return ParseWithArgs(path, r, nil, false) +} + +func toVars(vars map[string]string) cty.Value { + attrs := make(map[string]cty.Value, len(vars)) + for k, v := range vars { + attrs[k] = cty.StringVal(v) + } + + return cty.ObjectVal(attrs) +} + +func ParseWithArgs(path string, r io.Reader, vars map[string]string, allowFS bool) (*api.Job, error) { + if path == "" { + if f, ok := r.(*os.File); ok { + path = f.Name() + } + } + basedir := filepath.Dir(path) + + // Copy the reader into an in-memory buffer first since HCL requires it. + var buf bytes.Buffer + if _, err := io.Copy(&buf, r); err != nil { + return nil, err + } + + evalContext := &hcl.EvalContext{ + Functions: Functions(basedir, allowFS), + Variables: map[string]cty.Value{ + "vars": toVars(vars), + }, + UnknownVariable: func(expr string) (cty.Value, error) { + v := "${" + expr + "}" + return cty.StringVal(v), nil + }, + } + var result struct { + Job jobWrapper `hcl:"job,block"` + } + err := decode(path, buf.Bytes(), evalContext, &result) + if err != nil { + return nil, err + } + + normalizeJob(&result.Job) + return result.Job.Job, nil +} + +func decode(filename string, src []byte, ctx *hcl.EvalContext, target interface{}) error { + var file *hcl.File + var diags hcl.Diagnostics + + if !isJSON(src) { + file, diags = hclsyntax.ParseConfig(src, filename, hcl.Pos{Line: 1, Column: 1}) + } else { + file, diags = hcljson.Parse(src, filename) + + } + if diags.HasErrors() { + return diags + } + + body := hclutil.BlocksAsAttrs(file.Body) + body = dynblock.Expand(body, ctx) + diags = hclDecoder.DecodeBody(body, ctx, target) + if diags.HasErrors() { + var str strings.Builder + for i, diag := range diags { + if i != 0 { + str.WriteByte('\n') + } + str.WriteString(diag.Error()) + } + return errors.New(str.String()) + } + diags = append(diags, decodeMapInterfaceType(target, ctx)...) + return nil +} + +func isJSON(src []byte) bool { + for _, c := range src { + if c == ' ' { + continue + } + + return c == '{' + } + return false +} diff --git a/jobspec2/parse_job.go b/jobspec2/parse_job.go new file mode 100644 index 000000000000..ec50fe40f38a --- /dev/null +++ b/jobspec2/parse_job.go @@ -0,0 +1,169 @@ +package jobspec2 + +import ( + "time" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/nomad/api" +) + +type jobWrapper struct { + JobID string `hcl:",label"` + Job *api.Job + + Extra struct { + Vault *api.Vault `hcl:"vault,block"` + Tasks []*api.Task `hcl:"task,block"` + } +} + +func decodeJob(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + m := val.(*jobWrapper) + extra, _ := gohcl.ImpliedBodySchema(m.Extra) + content, job, diags := body.PartialContent(extra) + if len(diags) != 0 { + return diags + } + + for _, b := range content.Blocks { + if b.Type == "vault" { + v := &api.Vault{} + diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, v)...) + m.Extra.Vault = v + } else if b.Type == "task" { + t := &api.Task{} + diags = append(diags, hclDecoder.DecodeBody(b.Body, ctx, t)...) + if len(b.Labels) == 1 { + t.Name = b.Labels[0] + m.Extra.Tasks = append(m.Extra.Tasks, t) + } + } + } + + m.Job = &api.Job{} + return hclDecoder.DecodeBody(job, ctx, m.Job) +} + +func normalizeJob(jw *jobWrapper) { + j := jw.Job + if j.Name == nil { + j.Name = &jw.JobID + } + if j.ID == nil { + j.ID = &jw.JobID + } + + if j.Periodic != nil && j.Periodic.Spec != nil { + v := "cron" + j.Periodic.SpecType = &v + } + + normalizeVault(jw.Extra.Vault) + + if len(jw.Extra.Tasks) != 0 { + alone := make([]*api.TaskGroup, 0, len(jw.Extra.Tasks)) + for _, t := range jw.Extra.Tasks { + alone = append(alone, &api.TaskGroup{ + Name: &t.Name, + Tasks: []*api.Task{t}, + }) + } + alone = append(alone, j.TaskGroups...) + j.TaskGroups = alone + } + + for _, tg := range j.TaskGroups { + normalizeNetworkPorts(tg.Networks) + for _, t := range tg.Tasks { + if t.Resources != nil { + normalizeNetworkPorts(t.Resources.Networks) + } + + normalizeTemplates(t.Templates) + + // normalize Vault + normalizeVault(t.Vault) + + if t.Vault == nil { + t.Vault = jw.Extra.Vault + } + } + } +} + +func normalizeVault(v *api.Vault) { + if v == nil { + return + } + + if v.Env == nil { + v.Env = boolToPtr(true) + } + if v.ChangeMode == nil { + v.ChangeMode = stringToPtr("restart") + } +} + +func normalizeNetworkPorts(networks []*api.NetworkResource) { + if networks == nil { + return + } + for _, n := range networks { + if len(n.DynamicPorts) == 0 { + continue + } + + dynamic := make([]api.Port, 0, len(n.DynamicPorts)) + var reserved []api.Port + + for _, p := range n.DynamicPorts { + if p.Value > 0 { + reserved = append(reserved, p) + } else { + dynamic = append(dynamic, p) + } + } + if len(dynamic) == 0 { + dynamic = nil + } + + n.DynamicPorts = dynamic + n.ReservedPorts = reserved + } + +} + +func normalizeTemplates(templates []*api.Template) { + if len(templates) == 0 { + return + } + + for _, t := range templates { + if t.ChangeMode == nil { + t.ChangeMode = stringToPtr("restart") + } + if t.Perms == nil { + t.Perms = stringToPtr("0644") + } + if t.Splay == nil { + t.Splay = durationToPtr(5 * time.Second) + } + } +} + +func int8ToPtr(v int8) *int8 { + return &v +} + +func boolToPtr(v bool) *bool { + return &v +} + +func stringToPtr(v string) *string { + return &v +} + +func durationToPtr(v time.Duration) *time.Duration { + return &v +} diff --git a/jobspec2/parse_map.go b/jobspec2/parse_map.go new file mode 100644 index 000000000000..95c04138de0a --- /dev/null +++ b/jobspec2/parse_map.go @@ -0,0 +1,172 @@ +package jobspec2 + +import ( + "fmt" + "math" + "math/big" + "reflect" + + "github.com/hashicorp/hcl/v2" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" +) + +// decodeMapInterfaceType decodes hcl instances of `map[string]interface{}` fields +// of v. +// +// The HCL parser stores the hcl AST as the map values, and decodeMapInterfaceType +// evaluates the AST and converts them to the native golang types. +func decodeMapInterfaceType(v interface{}, ctx *hcl.EvalContext) hcl.Diagnostics { + w := &walker{ctx: ctx} + err := reflectwalk.Walk(v, w) + if err != nil { + w.diags = append(w.diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "unexpected internal error", + Detail: err.Error(), + }) + } + return w.diags +} + +type walker struct { + ctx *hcl.EvalContext + diags hcl.Diagnostics +} + +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + +func (w *walker) Map(m reflect.Value) error { + if !m.Type().AssignableTo(mapStringInterfaceType) { + return nil + } + + for _, k := range m.MapKeys() { + v := m.MapIndex(k) + if attr, ok := v.Interface().(*hcl.Attribute); ok { + c, diags := decodeInterface(attr.Expr, w.ctx) + w.diags = append(w.diags, diags...) + + m.SetMapIndex(k, reflect.ValueOf(c)) + } + } + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} +func decodeInterface(expr hcl.Expression, ctx *hcl.EvalContext) (interface{}, hcl.Diagnostics) { + srvVal, diags := expr.Value(ctx) + + dst, err := interfaceFromCtyValue(srvVal) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + return dst, diags +} + +func interfaceFromCtyValue(val cty.Value) (interface{}, error) { + t := val.Type() + + if val.IsNull() { + return nil, nil + } + + if !val.IsKnown() { + return nil, fmt.Errorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + return val.AsString(), nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) { + return math.Inf(1), nil + } else if val.RawEquals(cty.NegativeInfinity) { + return math.Inf(-1), nil + } else { + return smallestNumber(val.AsBigFloat()), nil + } + case cty.Bool: + return val.True(), nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(), t.IsTupleType(): + result := []interface{}{} + + it := val.ElementIterator() + for it.Next() { + _, ev := it.Element() + evi, err := interfaceFromCtyValue(ev) + if err != nil { + return nil, err + } + result = append(result, evi) + } + return result, nil + case t.IsMapType(): + result := map[string]interface{}{} + it := val.ElementIterator() + for it.Next() { + ek, ev := it.Element() + + ekv := ek.AsString() + evv, err := interfaceFromCtyValue(ev) + if err != nil { + return nil, err + } + + result[ekv] = evv + } + return []map[string]interface{}{result}, nil + case t.IsObjectType(): + result := map[string]interface{}{} + + for k := range t.AttributeTypes() { + av := val.GetAttr(k) + avv, err := interfaceFromCtyValue(av) + if err != nil { + return nil, err + } + + result[k] = avv + } + return []map[string]interface{}{result}, nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + return rawVal, nil + default: + // should never happen + return nil, fmt.Errorf("cannot serialize %s", t.FriendlyName()) + } +} + +func smallestNumber(b *big.Float) interface{} { + + if v, acc := b.Int64(); acc == big.Exact { + // check if it fits in int + if int64(int(v)) == v { + return int(v) + } + return v + } + + if v, acc := b.Float64(); acc == big.Exact || acc == big.Above { + return v + } + + return b +} diff --git a/jobspec2/parse_test.go b/jobspec2/parse_test.go new file mode 100644 index 000000000000..9d6a20b472a2 --- /dev/null +++ b/jobspec2/parse_test.go @@ -0,0 +1,130 @@ +package jobspec2 + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/hashicorp/nomad/jobspec" + "github.com/stretchr/testify/require" +) + +func TestEquivalentToHCL1(t *testing.T) { + hclSpecDir := "../jobspec/test-fixtures/" + fis, err := ioutil.ReadDir(hclSpecDir) + require.NoError(t, err) + + for _, fi := range fis { + name := fi.Name() + + t.Run(name, func(t *testing.T) { + f, err := os.Open(hclSpecDir + name) + require.NoError(t, err) + defer f.Close() + + job1, err := jobspec.Parse(f) + if err != nil { + t.Skip("file is not parsable in v1") + } + + f.Seek(0, 0) + + job2, err := Parse(name, f) + require.NoError(t, err) + + require.Equal(t, job1, job2) + }) + } +} + +func TestParse_VarsAndFunctions(t *testing.T) { + hcl := ` +job "example" { + datacenters = [for s in ["dc1", "dc2"] : upper(s)] + region = vars.region_var +} +` + + out, err := ParseWithArgs("input.hcl", strings.NewReader(hcl), map[string]string{"region_var": "aug"}, true) + require.NoError(t, err) + + require.Equal(t, []string{"DC1", "DC2"}, out.Datacenters) + require.NotNil(t, out.Region) + require.Equal(t, "aug", *out.Region) +} + +// TestParse_UnknownVariables asserts that unknown variables are left intact for further processing +func TestParse_UnknownVariables(t *testing.T) { + hcl := ` +job "example" { + datacenters = [for s in ["dc1", "dc2"] : upper(s)] + region = vars.region_var + meta { + known_var = "${vars.region_var}" + unknown_var = "${UNKNOWN}" + } +} +` + + out, err := ParseWithArgs("input.hcl", strings.NewReader(hcl), map[string]string{"region_var": "aug"}, true) + require.NoError(t, err) + + meta := map[string]string{ + "known_var": "aug", + "unknown_var": "${UNKNOWN}", + } + + require.Equal(t, meta, out.Meta) +} + +func TestParse_FileOperators(t *testing.T) { + hcl := ` +job "example" { + region = file("parse_test.go") +} +` + + t.Run("enabled", func(t *testing.T) { + out, err := ParseWithArgs("input.hcl", strings.NewReader(hcl), nil, true) + require.NoError(t, err) + + expected, err := ioutil.ReadFile("parse_test.go") + require.NoError(t, err) + + require.NotNil(t, out.Region) + require.Equal(t, string(expected), *out.Region) + }) + + t.Run("disabled", func(t *testing.T) { + _, err := ParseWithArgs("input.hcl", strings.NewReader(hcl), nil, false) + require.Error(t, err) + require.Contains(t, err.Error(), "filesystem function disabled") + }) +} + +func TestParseDynamic(t *testing.T) { + hcl := ` +job "example" { + +dynamic "group" { + for_each = ["groupA", "groupB", "groupC"] + labels = [group.value] + + content { + task "simple" { + driver = "raw_exec" + + } + } +} +} +` + out, err := ParseWithArgs("input.hcl", strings.NewReader(hcl), nil, true) + require.NoError(t, err) + + require.Len(t, out.TaskGroups, 3) + require.Equal(t, "groupA", *out.TaskGroups[0].Name) + require.Equal(t, "groupB", *out.TaskGroups[1].Name) + require.Equal(t, "groupC", *out.TaskGroups[2].Name) +} diff --git a/ui/mirage/faker.js b/ui/mirage/faker.js index 2baad57fb4c5..817781701b4e 100644 --- a/ui/mirage/faker.js +++ b/ui/mirage/faker.js @@ -11,6 +11,10 @@ if (config.environment !== 'test' || searchIncludesSeed) { } else { faker.seed(1); } +} else if (config.environment === 'test') { + const randomSeed = faker.random.number(); + console.log(`No seed specified with faker-seed query parameter, seeding Faker with ${randomSeed}`); + faker.seed(randomSeed); } export default faker; diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE new file mode 100644 index 000000000000..21253788607b --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go new file mode 100644 index 000000000000..ed749d4c51cf --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -0,0 +1,218 @@ +// Package cidr is a collection of assorted utilities for computing +// network and host addresses within network ranges. +// +// It expects a CIDR-type address structure where addresses are divided into +// some number of prefix bits representing the network and then the remaining +// suffix bits represent the host. +// +// For example, it can help to calculate addresses for sub-networks of a +// parent network, or to calculate host addresses within a particular prefix. +// +// At present this package is prioritizing simplicity of implementation and +// de-prioritizing speed and memory usage. Thus caution is advised before +// using this package in performance-critical applications or hot codepaths. +// Patches to improve the speed and memory usage may be accepted as long as +// they do not result in a significant increase in code complexity. +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +// Subnet takes a parent CIDR range and creates a subnet within it +// with the given number of additional prefix bits and the given +// network number. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number +// of 5, becomes 10.3.5.0/24 . +func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + newPrefixLen := parentLen + newBits + + if newPrefixLen > addrLen { + return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) + } + + maxNetNum := uint64(1< maxNetNum { + return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) + } + + return &net.IPNet{ + IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), + Mask: net.CIDRMask(newPrefixLen, addrLen), + }, nil +} + +// Host takes a parent CIDR range and turns it into a host IP address with +// the given host number. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func Host(base *net.IPNet, num int) (net.IP, error) { + ip := base.IP + mask := base.Mask + bigNum := big.NewInt(int64(num)) + + parentLen, addrLen := mask.Size() + hostLen := addrLen - parentLen + + maxHostNum := big.NewInt(int64(1)) + maxHostNum.Lsh(maxHostNum, uint(hostLen)) + maxHostNum.Sub(maxHostNum, big.NewInt(1)) + + numUint64 := big.NewInt(int64(bigNum.Uint64())) + if bigNum.Cmp(big.NewInt(0)) == -1 { + numUint64.Neg(bigNum) + numUint64.Sub(numUint64, big.NewInt(int64(1))) + bigNum.Sub(maxHostNum, numUint64) + } + + if numUint64.Cmp(maxHostNum) == 1 { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, bigNum, bitlength), nil +} + +// AddressRange returns the first and last addresses in the given CIDR range. +func AddressRange(network *net.IPNet) (net.IP, net.IP) { + // the first IP is easy + firstIP := network.IP + + // the last IP is the network address OR NOT the mask address + prefixLen, bits := network.Mask.Size() + if prefixLen == bits { + // Easy! + // But make sure that our two slices are distinct, since they + // would be in all other cases. + lastIP := make([]byte, len(firstIP)) + copy(lastIP, firstIP) + return firstIP, lastIP + } + + firstIPInt, bits := ipToInt(firstIP) + hostLen := uint(bits) - uint(prefixLen) + lastIPInt := big.NewInt(1) + lastIPInt.Lsh(lastIPInt, hostLen) + lastIPInt.Sub(lastIPInt, big.NewInt(1)) + lastIPInt.Or(lastIPInt, firstIPInt) + + return firstIP, intToIP(lastIPInt, bits) +} + +// AddressCount returns the number of distinct host addresses within the given +// CIDR range. +// +// Since the result is a uint64, this function returns meaningful information +// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. +func AddressCount(network *net.IPNet) uint64 { + prefixLen, bits := network.Mask.Size() + return 1 << (uint64(bits) - uint64(prefixLen)) +} + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := 0; j < len(subnets); j++ { + if i == j { + continue + } + + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go new file mode 100644 index 000000000000..e5e6a2cf91a1 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -0,0 +1,37 @@ +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +func ipToInt(ip net.IP) (*big.Int, int) { + val := &big.Int{} + val.SetBytes([]byte(ip)) + if len(ip) == net.IPv4len { + return val, 32 + } else if len(ip) == net.IPv6len { + return val, 128 + } else { + panic(fmt.Errorf("Unsupported address length %d", len(ip))) + } +} + +func intToIP(ipInt *big.Int, bits int) net.IP { + ipBytes := ipInt.Bytes() + ret := make([]byte, bits/8) + // Pack our IP bytes into the end of the return array, + // since big.Int.Bytes() removes front zero padding. + for i := 1; i <= len(ipBytes); i++ { + ret[len(ret)-i] = ipBytes[len(ipBytes)-i] + } + return net.IP(ret) +} + +func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { + ipInt, totalBits := ipToInt(ip) + bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) + ipInt.Or(ipInt, bigNum) + return intToIP(ipInt, totalBits) +} diff --git a/vendor/github.com/bmatcuk/doublestar/.gitignore b/vendor/github.com/bmatcuk/doublestar/.gitignore new file mode 100644 index 000000000000..af212ecc2819 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/.gitignore @@ -0,0 +1,32 @@ +# vi +*~ +*.swp +*.swo + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# test directory +test/ diff --git a/vendor/github.com/bmatcuk/doublestar/.travis.yml b/vendor/github.com/bmatcuk/doublestar/.travis.yml new file mode 100644 index 000000000000..ec4fee889646 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.11 + - 1.12 + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/bmatcuk/doublestar/LICENSE b/vendor/github.com/bmatcuk/doublestar/LICENSE new file mode 100644 index 000000000000..309c9d1d113f --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Bob Matcuk + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/bmatcuk/doublestar/README.md b/vendor/github.com/bmatcuk/doublestar/README.md new file mode 100644 index 000000000000..8e365c5e3f93 --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/README.md @@ -0,0 +1,109 @@ +![Release](https://img.shields.io/github/release/bmatcuk/doublestar.svg?branch=master) +[![Build Status](https://travis-ci.org/bmatcuk/doublestar.svg?branch=master)](https://travis-ci.org/bmatcuk/doublestar) +[![codecov.io](https://img.shields.io/codecov/c/github/bmatcuk/doublestar.svg?branch=master)](https://codecov.io/github/bmatcuk/doublestar?branch=master) + +# doublestar + +**doublestar** is a [golang](http://golang.org/) implementation of path pattern +matching and globbing with support for "doublestar" (aka globstar: `**`) +patterns. + +doublestar patterns match files and directories recursively. For example, if +you had the following directory structure: + +``` +grandparent +`-- parent + |-- child1 + `-- child2 +``` + +You could find the children with patterns such as: `**/child*`, +`grandparent/**/child?`, `**/parent/*`, or even just `**` by itself (which will +return all files and directories recursively). + +Bash's globstar is doublestar's inspiration and, as such, works similarly. +Note that the doublestar must appear as a path component by itself. A pattern +such as `/path**` is invalid and will be treated the same as `/path*`, but +`/path*/**` should achieve the desired result. Additionally, `/path/**` will +match all directories and files under the path directory, but `/path/**/` will +only match directories. + +## Installation + +**doublestar** can be installed via `go get`: + +```bash +go get github.com/bmatcuk/doublestar +``` + +To use it in your code, you must import it: + +```go +import "github.com/bmatcuk/doublestar" +``` + +## Functions + +### Match +```go +func Match(pattern, name string) (bool, error) +``` + +Match returns true if `name` matches the file name `pattern` +([see below](#patterns)). `name` and `pattern` are split on forward slash (`/`) +characters and may be relative or absolute. + +Note: `Match()` is meant to be a drop-in replacement for `path.Match()`. As +such, it always uses `/` as the path separator. If you are writing code that +will run on systems where `/` is not the path separator (such as Windows), you +want to use `PathMatch()` (below) instead. + + +### PathMatch +```go +func PathMatch(pattern, name string) (bool, error) +``` + +PathMatch returns true if `name` matches the file name `pattern` +([see below](#patterns)). The difference between Match and PathMatch is that +PathMatch will automatically use your system's path separator to split `name` +and `pattern`. + +`PathMatch()` is meant to be a drop-in replacement for `filepath.Match()`. + +### Glob +```go +func Glob(pattern string) ([]string, error) +``` + +Glob finds all files and directories in the filesystem that match `pattern` +([see below](#patterns)). `pattern` may be relative (to the current working +directory), or absolute. + +`Glob()` is meant to be a drop-in replacement for `filepath.Glob()`. + +## Patterns + +**doublestar** supports the following special terms in the patterns: + +Special Terms | Meaning +------------- | ------- +`*` | matches any sequence of non-path-separators +`**` | matches any sequence of characters, including path separators +`?` | matches any single non-path-separator character +`[class]` | matches any single non-path-separator character against a class of characters ([see below](#character-classes)) +`{alt1,...}` | matches a sequence of characters if one of the comma-separated alternatives matches + +Any character with a special meaning can be escaped with a backslash (`\`). + +### Character Classes + +Character classes support the following: + +Class | Meaning +---------- | ------- +`[abc]` | matches any single character within the set +`[a-z]` | matches any single character in the range +`[^class]` | matches any single character which does *not* match the class + diff --git a/vendor/github.com/bmatcuk/doublestar/doublestar.go b/vendor/github.com/bmatcuk/doublestar/doublestar.go new file mode 100644 index 000000000000..0044dfa8314e --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/doublestar.go @@ -0,0 +1,476 @@ +package doublestar + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "unicode/utf8" +) + +// ErrBadPattern indicates a pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// Split a path on the given separator, respecting escaping. +func splitPathOnSeparator(path string, separator rune) (ret []string) { + idx := 0 + if separator == '\\' { + // if the separator is '\\', then we can just split... + ret = strings.Split(path, string(separator)) + idx = len(ret) + } else { + // otherwise, we need to be careful of situations where the separator was escaped + cnt := strings.Count(path, string(separator)) + if cnt == 0 { + return []string{path} + } + + ret = make([]string, cnt+1) + pathlen := len(path) + separatorLen := utf8.RuneLen(separator) + emptyEnd := false + for start := 0; start < pathlen; { + end := indexRuneWithEscaping(path[start:], separator) + if end == -1 { + emptyEnd = false + end = pathlen + } else { + emptyEnd = true + end += start + } + ret[idx] = path[start:end] + start = end + separatorLen + idx++ + } + + // If the last rune is a path separator, we need to append an empty string to + // represent the last, empty path component. By default, the strings from + // make([]string, ...) will be empty, so we just need to icrement the count + if emptyEnd { + idx++ + } + } + + return ret[:idx] +} + +// Find the first index of a rune in a string, +// ignoring any times the rune is escaped using "\". +func indexRuneWithEscaping(s string, r rune) int { + end := strings.IndexRune(s, r) + if end == -1 { + return -1 + } + if end > 0 && s[end-1] == '\\' { + start := end + utf8.RuneLen(r) + end = indexRuneWithEscaping(s[start:], r) + if end != -1 { + end += start + } + } + return end +} + +// Match returns true if name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '**' matches any sequence of characters, including +// path separators. +// '?' matches any single non-path-separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// '{' { term } [ ',' { term } ... ] '}' +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The path-separator defaults to the '/' character. The only possible +// returned error is ErrBadPattern, when pattern is malformed. +// +// Note: this is meant as a drop-in replacement for path.Match() which +// always uses '/' as the path separator. If you want to support systems +// which use a different path separator (such as Windows), what you want +// is the PathMatch() function below. +// +func Match(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, '/') +} + +// PathMatch is like Match except that it uses your system's path separator. +// For most systems, this will be '/'. However, for Windows, it would be '\\'. +// Note that for systems where the path separator is '\\', escaping is +// disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Match(). +// +func PathMatch(pattern, name string) (bool, error) { + return matchWithSeparator(pattern, name, os.PathSeparator) +} + +// Match returns true if name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-path-separators +// '**' matches any sequence of characters, including +// path separators. +// '?' matches any single non-path-separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// '{' { term } [ ',' { term } ... ] '}' +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c, unless separator is '\\' +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +func matchWithSeparator(pattern, name string, separator rune) (bool, error) { + patternComponents := splitPathOnSeparator(pattern, separator) + nameComponents := splitPathOnSeparator(name, separator) + return doMatching(patternComponents, nameComponents) +} + +func doMatching(patternComponents, nameComponents []string) (matched bool, err error) { + // check for some base-cases + patternLen, nameLen := len(patternComponents), len(nameComponents) + if patternLen == 0 && nameLen == 0 { + return true, nil + } + if patternLen == 0 || nameLen == 0 { + return false, nil + } + + patIdx, nameIdx := 0, 0 + for patIdx < patternLen && nameIdx < nameLen { + if patternComponents[patIdx] == "**" { + // if our last pattern component is a doublestar, we're done - + // doublestar will match any remaining name components, if any. + if patIdx++; patIdx >= patternLen { + return true, nil + } + + // otherwise, try matching remaining components + for ; nameIdx < nameLen; nameIdx++ { + if m, _ := doMatching(patternComponents[patIdx:], nameComponents[nameIdx:]); m { + return true, nil + } + } + return false, nil + } + + // try matching components + matched, err = matchComponent(patternComponents[patIdx], nameComponents[nameIdx]) + if !matched || err != nil { + return + } + + patIdx++ + nameIdx++ + } + return patIdx >= patternLen && nameIdx >= nameLen, nil +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of pattern is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// Your system path separator is automatically used. This means on +// systems where the separator is '\\' (Windows), escaping will be +// disabled. +// +// Note: this is meant as a drop-in replacement for filepath.Glob(). +// +func Glob(pattern string) (matches []string, err error) { + patternComponents := splitPathOnSeparator(filepath.ToSlash(pattern), '/') + if len(patternComponents) == 0 { + return nil, nil + } + + // On Windows systems, this will return the drive name ('C:') for filesystem + // paths, or \\\ for UNC paths. On other systems, it will + // return an empty string. Since absolute paths on non-Windows systems start + // with a slash, patternComponent[0] == volumeName will return true for both + // absolute Windows paths and absolute non-Windows paths, but we need a + // separate check for UNC paths. + volumeName := filepath.VolumeName(pattern) + isWindowsUNC := strings.HasPrefix(pattern, `\\`) + if isWindowsUNC || patternComponents[0] == volumeName { + startComponentIndex := 1 + if isWindowsUNC { + startComponentIndex = 4 + } + return doGlob(fmt.Sprintf("%s%s", volumeName, string(os.PathSeparator)), patternComponents[startComponentIndex:], matches) + } + + // otherwise, it's a relative pattern + return doGlob(".", patternComponents, matches) +} + +// Perform a glob +func doGlob(basedir string, components, matches []string) (m []string, e error) { + m = matches + e = nil + + // figure out how many components we don't need to glob because they're + // just names without patterns - we'll use os.Lstat below to check if that + // path actually exists + patLen := len(components) + patIdx := 0 + for ; patIdx < patLen; patIdx++ { + if strings.IndexAny(components[patIdx], "*?[{\\") >= 0 { + break + } + } + if patIdx > 0 { + basedir = filepath.Join(basedir, filepath.Join(components[0:patIdx]...)) + } + + // Lstat will return an error if the file/directory doesn't exist + fi, err := os.Lstat(basedir) + if err != nil { + return + } + + // if there are no more components, we've found a match + if patIdx >= patLen { + m = append(m, basedir) + return + } + + // otherwise, we need to check each item in the directory... + // first, if basedir is a symlink, follow it... + if (fi.Mode() & os.ModeSymlink) != 0 { + fi, err = os.Stat(basedir) + if err != nil { + return + } + } + + // confirm it's a directory... + if !fi.IsDir() { + return + } + + // read directory + dir, err := os.Open(basedir) + if err != nil { + return + } + defer dir.Close() + + files, _ := dir.Readdir(-1) + lastComponent := (patIdx + 1) >= patLen + if components[patIdx] == "**" { + // if the current component is a doublestar, we'll try depth-first + for _, file := range files { + // if symlink, we may want to follow + if (file.Mode() & os.ModeSymlink) != 0 { + file, err = os.Stat(filepath.Join(basedir, file.Name())) + if err != nil { + continue + } + } + + if file.IsDir() { + // recurse into directories + if lastComponent { + m = append(m, filepath.Join(basedir, file.Name())) + } + m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx:], m) + } else if lastComponent { + // if the pattern's last component is a doublestar, we match filenames, too + m = append(m, filepath.Join(basedir, file.Name())) + } + } + if lastComponent { + return // we're done + } + patIdx++ + lastComponent = (patIdx + 1) >= patLen + } + + // check items in current directory and recurse + var match bool + for _, file := range files { + match, e = matchComponent(components[patIdx], file.Name()) + if e != nil { + return + } + if match { + if lastComponent { + m = append(m, filepath.Join(basedir, file.Name())) + } else { + m, e = doGlob(filepath.Join(basedir, file.Name()), components[patIdx+1:], m) + } + } + } + return +} + +// Attempt to match a single pattern component with a path component +func matchComponent(pattern, name string) (bool, error) { + // check some base cases + patternLen, nameLen := len(pattern), len(name) + if patternLen == 0 && nameLen == 0 { + return true, nil + } + if patternLen == 0 { + return false, nil + } + if nameLen == 0 && pattern != "*" { + return false, nil + } + + // check for matches one rune at a time + patIdx, nameIdx := 0, 0 + for patIdx < patternLen && nameIdx < nameLen { + patRune, patAdj := utf8.DecodeRuneInString(pattern[patIdx:]) + nameRune, nameAdj := utf8.DecodeRuneInString(name[nameIdx:]) + if patRune == '\\' { + // handle escaped runes + patIdx += patAdj + patRune, patAdj = utf8.DecodeRuneInString(pattern[patIdx:]) + if patRune == utf8.RuneError { + return false, ErrBadPattern + } else if patRune == nameRune { + patIdx += patAdj + nameIdx += nameAdj + } else { + return false, nil + } + } else if patRune == '*' { + // handle stars + if patIdx += patAdj; patIdx >= patternLen { + // a star at the end of a pattern will always + // match the rest of the path + return true, nil + } + + // check if we can make any matches + for ; nameIdx < nameLen; nameIdx += nameAdj { + if m, _ := matchComponent(pattern[patIdx:], name[nameIdx:]); m { + return true, nil + } + } + return false, nil + } else if patRune == '[' { + // handle character sets + patIdx += patAdj + endClass := indexRuneWithEscaping(pattern[patIdx:], ']') + if endClass == -1 { + return false, ErrBadPattern + } + endClass += patIdx + classRunes := []rune(pattern[patIdx:endClass]) + classRunesLen := len(classRunes) + if classRunesLen > 0 { + classIdx := 0 + matchClass := false + if classRunes[0] == '^' { + classIdx++ + } + for classIdx < classRunesLen { + low := classRunes[classIdx] + if low == '-' { + return false, ErrBadPattern + } + classIdx++ + if low == '\\' { + if classIdx < classRunesLen { + low = classRunes[classIdx] + classIdx++ + } else { + return false, ErrBadPattern + } + } + high := low + if classIdx < classRunesLen && classRunes[classIdx] == '-' { + // we have a range of runes + if classIdx++; classIdx >= classRunesLen { + return false, ErrBadPattern + } + high = classRunes[classIdx] + if high == '-' { + return false, ErrBadPattern + } + classIdx++ + if high == '\\' { + if classIdx < classRunesLen { + high = classRunes[classIdx] + classIdx++ + } else { + return false, ErrBadPattern + } + } + } + if low <= nameRune && nameRune <= high { + matchClass = true + } + } + if matchClass == (classRunes[0] == '^') { + return false, nil + } + } else { + return false, ErrBadPattern + } + patIdx = endClass + 1 + nameIdx += nameAdj + } else if patRune == '{' { + // handle alternatives such as {alt1,alt2,...} + patIdx += patAdj + endOptions := indexRuneWithEscaping(pattern[patIdx:], '}') + if endOptions == -1 { + return false, ErrBadPattern + } + endOptions += patIdx + options := splitPathOnSeparator(pattern[patIdx:endOptions], ',') + patIdx = endOptions + 1 + for _, o := range options { + m, e := matchComponent(o+pattern[patIdx:], name[nameIdx:]) + if e != nil { + return false, e + } + if m { + return true, nil + } + } + return false, nil + } else if patRune == '?' || patRune == nameRune { + // handle single-rune wildcard + patIdx += patAdj + nameIdx += nameAdj + } else { + return false, nil + } + } + if patIdx >= patternLen && nameIdx >= nameLen { + return true, nil + } + if nameIdx >= nameLen && pattern[patIdx:] == "*" || pattern[patIdx:] == "**" { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/bmatcuk/doublestar/go.mod b/vendor/github.com/bmatcuk/doublestar/go.mod new file mode 100644 index 000000000000..ce1688f7301a --- /dev/null +++ b/vendor/github.com/bmatcuk/doublestar/go.mod @@ -0,0 +1,3 @@ +module github.com/bmatcuk/doublestar + +go 1.12 diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000000..d8156a60ba9b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000000..04fdf09f136b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000000..b4bb97f6bcd0 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000000..5dc68268d900 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000000..9d92c11f16f5 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000000..fa820b9d3092 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000000..5b8a4b9af8ce --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000000..fc84cd79d4c7 --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000000..b17461631511 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000000..7f9e0c6c0e38 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000000..d651a2b0619f --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000000..24b78edc9071 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000000..0cbbcddbd6e8 --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000000..f326b54db37a --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000000..e6ef06cdc87a --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000000..5ea6c737806e --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000000..524404cc5227 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000000..199a1ac65403 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000000..84af91c9f54f --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/LICENSE b/vendor/github.com/hashicorp/go-cty-funcs/LICENSE new file mode 100644 index 000000000000..c33dcc7c928c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cty-funcs/cidr/host.go b/vendor/github.com/hashicorp/go-cty-funcs/cidr/host.go new file mode 100644 index 000000000000..fb7a9bce806e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/cidr/host.go @@ -0,0 +1,49 @@ +package cidr + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// HostFunc is a function that calculates a full host IP address within a given +// IP network address prefix. +var HostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// Host calculates a full host IP address within a given IP network address prefix. +func Host(prefix, hostnum cty.Value) (cty.Value, error) { + return HostFunc.Call([]cty.Value{prefix, hostnum}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/cidr/netmask.go b/vendor/github.com/hashicorp/go-cty-funcs/cidr/netmask.go new file mode 100644 index 000000000000..918814dc7dbd --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/cidr/netmask.go @@ -0,0 +1,34 @@ +package cidr + +import ( + "fmt" + "net" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// NetmaskFunc is a function that converts an IPv4 address prefix given in CIDR +// notation into a subnet mask address. +var NetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// Netmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func Netmask(prefix cty.Value) (cty.Value, error) { + return NetmaskFunc.Call([]cty.Value{prefix}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnet.go b/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnet.go new file mode 100644 index 000000000000..ffcb24dde7a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnet.go @@ -0,0 +1,66 @@ +package cidr + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// SubnetFunc is a function that calculates a subnet address within a given +// IP network address prefix. +var SubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number will be + // a 32-bit int, we only allow extension of 32 bits in one call even if + // we're running on a 64-bit machine. (Of course, this is significant + // only for IPv6.) + if newbits > 32 { + return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// Subnet calculates a subnet address within a given IP network address prefix. +func Subnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return SubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnets.go b/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnets.go new file mode 100644 index 000000000000..e70e015eaa74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/cidr/subnets.go @@ -0,0 +1,99 @@ +package cidr + +import ( + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// SubnetsFunc is similar to SubnetFunc but calculates many consecutive subnet +// addresses at once, rather than just a single subnet extension. +var SubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// Subnets calculates a sequence of consecutive subnet prefixes that may be of +// different prefix lengths under a common base prefix. +func Subnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return SubnetsFunc.Call(args) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/crypto/bcrypt.go b/vendor/github.com/hashicorp/go-cty-funcs/crypto/bcrypt.go new file mode 100644 index 000000000000..4f70df5b98b5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/crypto/bcrypt.go @@ -0,0 +1,59 @@ +package crypto + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" +) + +// BcryptFunc is a function that computes a hash of the given string using the +// Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format usually expected in the +// shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/crypto/hash.go b/vendor/github.com/hashicorp/go-cty-funcs/crypto/hash.go new file mode 100644 index 000000000000..232d21098d16 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/crypto/hash.go @@ -0,0 +1,27 @@ +package crypto + +import ( + "hash" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/crypto/md5.go b/vendor/github.com/hashicorp/go-cty-funcs/crypto/md5.go new file mode 100644 index 000000000000..79f54c6c8056 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/crypto/md5.go @@ -0,0 +1,18 @@ +package crypto + +import ( + "crypto/md5" + "encoding/hex" + + "github.com/zclconf/go-cty/cty" +) + +// Md5Func is a function that computes the MD5 hash of a given string and +// encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal +// digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/crypto/rsa.go b/vendor/github.com/hashicorp/go-cty-funcs/crypto/rsa.go new file mode 100644 index 000000000000..8a89c4400640 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/crypto/rsa.go @@ -0,0 +1,64 @@ +package crypto + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// RsaDecryptFunc is a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return cty.UnknownVal(cty.String), fmt.Errorf( + "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", + ) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(string(out)), nil + }, +}) + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/crypto/sha.go b/vendor/github.com/hashicorp/go-cty-funcs/crypto/sha.go new file mode 100644 index 000000000000..5e1cb4a36387 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/crypto/sha.go @@ -0,0 +1,40 @@ +package crypto + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + + "github.com/zclconf/go-cty/cty" +) + +// Sha1Func is a function that computes the SHA1 hash of a given string and +// encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// Sha256Func is a function that computes the SHA256 hash of a given string and +// encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// Sha512Func is a function that computes the SHA512 hash of a given string and +// encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// Sha1 computes the SHA1 hash of a given string and encodes it with +// hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with +// hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with +// hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/encoding/base64.go b/vendor/github.com/hashicorp/go-cty-funcs/encoding/base64.go new file mode 100644 index 000000000000..e1e1b130138b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/encoding/base64.go @@ -0,0 +1,71 @@ +package encoding + +import ( + "encoding/base64" + "fmt" + "log" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Base64DecodeFunc is a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc is a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/encoding/url.go b/vendor/github.com/hashicorp/go-cty-funcs/encoding/url.go new file mode 100644 index 000000000000..a39eee0bd584 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/encoding/url.go @@ -0,0 +1,34 @@ +package encoding + +import ( + "net/url" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// URLEncodeFunc is a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/filesystem/filesystem.go b/vendor/github.com/hashicorp/go-cty-funcs/filesystem/filesystem.go new file mode 100644 index 000000000000..dc8d2728504b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/filesystem/filesystem.go @@ -0,0 +1,312 @@ +package filesystem + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/bmatcuk/doublestar" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeFileExistsFunc is a function that takes a path and determines whether a +// file exists at that path. +// +// MakeFileExistsFunc will try to expand a path starting with a '~' to the home +// folder using github.com/mitchellh/go-homedir +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// MakeFileSetFunc is a function that takes a glob pattern +// and enumerates a file set from that pattern +func MakeFileSetFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "pattern", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Set(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + pattern := args[1].AsString() + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Join the path to the glob pattern, while ensuring the full + // pattern is canonical for the host OS. The joined path is + // automatically cleaned during this operation. + pattern = filepath.Join(path, pattern) + + matches, err := doublestar.Glob(pattern) + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err) + } + + var matchVals []cty.Value + for _, match := range matches { + fi, err := os.Stat(match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err) + } + + if !fi.Mode().IsRegular() { + continue + } + + // Remove the path and file separator from matches. + match, err = filepath.Rel(path, match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match (%s): %s", match, err) + } + + // Replace any remaining file separators with forward slash (/) + // separators for cross-system compatibility. + match = filepath.ToSlash(match) + + matchVals = append(matchVals, cty.StringVal(match)) + } + + if len(matchVals) == 0 { + return cty.SetValEmpty(cty.String), nil + } + + return cty.SetVal(matchVals), nil + }, + }) +} + +// BasenameFunc is a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc is a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc is a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc is a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func readFileBytes(baseDir, path string) ([]byte, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + src, err := ioutil.ReadFile(path) + if err != nil { + // ReadFile does not return Terraform-user-friendly error + // messages, so we'll provide our own. + if os.IsNotExist(err) { + return nil, fmt.Errorf("no file exists at %s", path) + } + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileSet enumerates a set of files given a glob pattern +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { + fn := MakeFileSetFunc(baseDir) + return fn.Call([]cty.Value{path, pattern}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v4.go b/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v4.go new file mode 100644 index 000000000000..1fb556739517 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v4.go @@ -0,0 +1,28 @@ +package uuid + +import ( + "github.com/google/uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var V4Func = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + uuid, err := uuid.NewRandom() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(uuid.String()), nil + }, +}) + +// V4 generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a "pure" function: it will generate a different result for each +// call. +func V4() (cty.Value, error) { + return V4Func.Call(nil) +} diff --git a/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v5.go b/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v5.go new file mode 100644 index 000000000000..3cc068b99db8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty-funcs/uuid/uuid_v5.go @@ -0,0 +1,51 @@ +package uuid + +import ( + "fmt" + + uuidv5 "github.com/google/uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var V5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// V5 generates and returns a Type-5 UUID in the standard hexadecimal +// string format. +// +// This is not a "pure" function: it will generate a different result for each +// call. +func V5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return V5Func.Call([]cty.Value{namespace, name}) +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore index 15586a2b540d..822fa09f527a 100644 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -1,9 +1,9 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a32161bb..000000000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile index 84fd743f5cc2..9fafd5017cfa 100644 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -1,18 +1,18 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml deleted file mode 100644 index 4db0b7112728..000000000000 --- a/vendor/github.com/hashicorp/hcl/appveyor.yml +++ /dev/null @@ -1,19 +0,0 @@ -version: "build-{branch}-{build}" -image: Visual Studio 2015 -clone_folder: c:\gopath\src\github.com\hashicorp\hcl -environment: - GOPATH: c:\gopath -init: - - git config --global core.autocrlf false -install: -- cmd: >- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index 6ed2fb9bb7af..2ae978f95e54 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -13,9 +13,6 @@ import ( "github.com/hashicorp/hcl/hcl/token" ) -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - var ( // nodeType holds a reference to the type of ast.Node nodeType reflect.Type = findNodeType() @@ -597,7 +594,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) structType := structVal.Type() for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + tagParts := strings.Split(fieldTag(fieldType), ",") // Ignore fields with tag name "-" if tagParts[0] == "-" { @@ -664,7 +661,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) fieldName := field.Name - tagValue := field.Tag.Get(tagName) + tagValue := fieldTag(field) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { @@ -765,3 +762,12 @@ func removeCaseFold(xs []string, y string) []string { } return xs } + +// read the tag for HCL settings: check `hcl1` first and fallback to `hcl` +func fieldTag(fieldType reflect.StructField) string { + tag := fieldType.Tag.Get("hcl1") + if tag == "" { + tag = fieldType.Tag.Get("hcl") + } + return tag +} diff --git a/vendor/github.com/hashicorp/hcl/go.mod b/vendor/github.com/hashicorp/hcl/go.mod index 4debbbe35805..8cf25010ba79 100644 --- a/vendor/github.com/hashicorp/hcl/go.mod +++ b/vendor/github.com/hashicorp/hcl/go.mod @@ -1,3 +1,5 @@ module github.com/hashicorp/hcl require github.com/davecgh/go-spew v1.1.1 + +go 1.14 diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go index 6e5ef654bb83..f5b6b93b94b0 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -25,6 +25,8 @@ func (ObjectType) node() {} func (LiteralType) node() {} func (ListType) node() {} +var unknownPos token.Pos + // File represents a single HCL file type File struct { Node Node // usually a *ObjectList @@ -108,7 +110,12 @@ func (o *ObjectList) Elem() *ObjectList { } func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position + // If an Object has no members, it won't have a first item + // to use as position + if len(o.Items) == 0 { + return unknownPos + } + // Return the uninitialized position return o.Items[0].Pos() } @@ -133,10 +140,10 @@ type ObjectItem struct { } func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. + // If a parsed object has no keys, there is no position + // for its first element. if len(o.Keys) == 0 { - return token.Pos{} + return unknownPos } return o.Keys[0].Pos() diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md index f16e589b0a0c..120bfe98b096 100644 --- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -1,5 +1,29 @@ # HCL Changelog +## v2.7.0 (October 14, 2020) + +### Enhancements + +* json: There is a new function `ParseWithStartPos`, which allows overriding the starting position for parsing in case the given JSON bytes are a fragment of a larger document, such as might happen when decoding with `encoding/json` into a `json.RawMessage`. ([#389](https://github.com/hashicorp/hcl/pull/389)) +* json: There is a new function `ParseExpression`, which allows parsing a JSON string directly in expression mode, whereas previously it was only possible to parse a JSON string in body mode. ([#381](https://github.com/hashicorp/hcl/pull/381)) +* hclwrite: `Block` type now supports `SetType` and `SetLabels`, allowing surgical changes to the type and labels of an existing block without having to reconstruct the entire block. ([#340](https://github.com/hashicorp/hcl/pull/340)) + +### Bugs Fixed + +* hclsyntax: Fix confusing error message for bitwise OR operator ([#380](https://github.com/hashicorp/hcl/pull/380)) +* hclsyntax: Several bug fixes for using HCL with values containing cty "marks" ([#404](https://github.com/hashicorp/hcl/pull/404), [#406](https://github.com/hashicorp/hcl/pull/404), [#407](https://github.com/hashicorp/hcl/pull/404)) + +## v2.6.0 (June 4, 2020) + +### Enhancements + +* hcldec: Add a new `Spec`, `ValidateSpec`, which allows custom validation of values at decode-time. ([#387](https://github.com/hashicorp/hcl/pull/387)) + +### Bugs Fixed + +* hclsyntax: Fix panic with combination of sequences and null arguments ([#386](https://github.com/hashicorp/hcl/pull/386)) +* hclsyntax: Fix handling of unknown values and sequences ([#386](https://github.com/hashicorp/hcl/pull/386)) + ## v2.5.1 (May 14, 2020) ### Bugs Fixed diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md index 3d0d509d53e4..9af736c9dd5c 100644 --- a/vendor/github.com/hashicorp/hcl/v2/README.md +++ b/vendor/github.com/hashicorp/hcl/v2/README.md @@ -33,11 +33,25 @@ package main import ( "log" + "github.com/hashicorp/hcl/v2/hclsimple" ) type Config struct { - LogLevel string `hcl:"log_level"` + IOMode string `hcl:"io_mode"` + Service ServiceConfig `hcl:"service,block"` +} + +type ServiceConfig struct { + Protocol string `hcl:"protocol,label"` + Type string `hcl:"type,label"` + ListenAddr string `hcl:"listen_addr"` + Processes []ProcessConfig `hcl:"process,block"` +} + +type ProcessConfig struct { + Type string `hcl:"type,label"` + Command []string `hcl:"command"` } func main() { diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go index c320961e11ed..c80535b7a73f 100644 --- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -22,14 +22,14 @@ const ( ) // Diagnostic represents information to be presented to a user about an -// error or anomoly in parsing or evaluating configuration. +// error or anomaly in parsing or evaluating configuration. type Diagnostic struct { Severity DiagnosticSeverity // Summary and Detail contain the English-language description of the // problem. Summary is a terse description of the general problem and // detail is a more elaborate, often-multi-sentence description of - // the probem and what might be done to solve it. + // the problem and what might be done to solve it. Summary string Detail string diff --git a/vendor/github.com/hashicorp/hcl/v2/eval_context.go b/vendor/github.com/hashicorp/hcl/v2/eval_context.go index 915910ad8a57..52c8c48728b2 100644 --- a/vendor/github.com/hashicorp/hcl/v2/eval_context.go +++ b/vendor/github.com/hashicorp/hcl/v2/eval_context.go @@ -11,6 +11,8 @@ type EvalContext struct { Variables map[string]cty.Value Functions map[string]function.Function parent *EvalContext + + UnknownVariable func(expr string) (cty.Value, error) } // NewChild returns a new EvalContext that is a child of the receiver. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md new file mode 100644 index 000000000000..f59ce92e94ed --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md @@ -0,0 +1,184 @@ +# HCL Dynamic Blocks Extension + +This HCL extension implements a special block type named "dynamic" that can +be used to dynamically generate blocks of other types by iterating over +collection values. + +Normally the block structure in an HCL configuration file is rigid, even +though dynamic expressions can be used within attribute values. This is +convenient for most applications since it allows the overall structure of +the document to be decoded easily, but in some applications it is desirable +to allow dynamic block generation within certain portions of the configuration. + +Dynamic block generation is performed using the `dynamic` block type: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + dynamic "nested" { + for_each = ["a", "b", "c"] + iterator = nested + content { + foo = "dynamic block ${nested.value}" + } + } + + nested { + foo = "static block 2" + } +} +``` + +The above is interpreted as if it were written as follows: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + nested { + foo = "dynamic block a" + } + + nested { + foo = "dynamic block b" + } + + nested { + foo = "dynamic block c" + } + + nested { + foo = "static block 2" + } +} +``` + +Since HCL block syntax is not normally exposed to the possibility of unknown +values, this extension must make some compromises when asked to iterate over +an unknown collection. If the length of the collection cannot be statically +recognized (because it is an unknown value of list, map, or set type) then +the `dynamic` construct will generate a _single_ dynamic block whose iterator +key and value are both unknown values of the dynamic pseudo-type, thus causing +any attribute values derived from iteration to appear as unknown values. There +is no explicit representation of the fact that the length of the collection may +eventually be different than one. + +## Usage + +Pass a body to function `Expand` to obtain a new body that will, on access +to its content, evaluate and expand any nested `dynamic` blocks. +Dynamic block processing is also automatically propagated into any nested +blocks that are returned, allowing users to nest dynamic blocks inside +one another and to nest dynamic blocks inside other static blocks. + +HCL structural decoding does not normally have access to an `EvalContext`, so +any variables and functions that should be available to the `for_each` +and `labels` expressions must be passed in when calling `Expand`. Expressions +within the `content` block are evaluated separately and so can be passed a +separate `EvalContext` if desired, during normal attribute expression +evaluation. + +## Detecting Variables + +Some applications dynamically generate an `EvalContext` by analyzing which +variables are referenced by an expression before evaluating it. + +This unfortunately requires some extra effort when this analysis is required +for the context passed to `Expand`: the HCL API requires a schema to be +provided in order to do any analysis of the blocks in a body, but the low-level +schema model provides a description of only one level of nested blocks at +a time, and thus a new schema must be provided for each additional level of +nesting. + +To make this arduous process as convenient as possible, this package provides +a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` +instance that can be used to find variables directly in a given body and also +determine which nested blocks require recursive calls. Using this mechanism +requires that the caller be able to look up a schema given a nested block type. +For _simple_ formats where a specific block type name always has the same schema +regardless of context, a walk can be implemented as follows: + +```go +func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { + vars, children := node.Visit(schema) + + for _, child := range children { + var childSchema *hcl.BodySchema + switch child.BlockTypeName { + case "a": + childSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "b", + LabelNames: []string{"key"}, + }, + }, + } + case "b": + childSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "val", + Required: true, + }, + }, + } + default: + // Should never happen, because the above cases should be exhaustive + // for the application's configuration format. + panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) + } + + vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) + } +} +``` + +### Detecting Variables with `hcldec` Specifications + +For applications that use the higher-level `hcldec` package to decode nested +configuration structures into `cty` values, the same specification can be used +to automatically drive the recursive variable-detection walk described above. + +The helper function `ForEachVariablesHCLDec` allows an entire recursive +configuration structure to be analyzed in a single call given a `hcldec.Spec` +that describes the nested block structure. This means a `hcldec`-based +application can support dynamic blocks with only a little additional effort: + +```go +func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { + // Determine which variables are needed to expand dynamic blocks + neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) + + // Build a suitable EvalContext and expand dynamic blocks + dynCtx := buildEvalContext(neededForDynamic) + dynBody := dynblock.Expand(body, dynCtx) + + // Determine which variables are needed to fully decode the expanded body + // This will analyze expressions that came both from static blocks in the + // original body and from blocks that were dynamically added by Expand. + neededForDecode := hcldec.Variables(dynBody, spec) + + // Build a suitable EvalContext and then fully decode the body as per the + // hcldec specification. + decCtx := buildEvalContext(neededForDecode) + return hcldec.Decode(dynBody, spec, decCtx) +} + +func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { + // (to be implemented by your application) +} +``` + +# Performance + +This extension is going quite harshly against the grain of the HCL API, and +so it uses lots of wrapping objects and temporary data structures to get its +work done. HCL in general is not suitable for use in high-performance situations +or situations sensitive to memory pressure, but that is _especially_ true for +this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go new file mode 100644 index 000000000000..65a9eab2df40 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go @@ -0,0 +1,262 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// expandBody wraps another hcl.Body and expands any "dynamic" blocks found +// inside whenever Content or PartialContent is called. +type expandBody struct { + original hcl.Body + forEachCtx *hcl.EvalContext + iteration *iteration // non-nil if we're nested inside another "dynamic" block + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the transformer. + // + // Note that this is re-implemented here rather than delegating to the + // existing support required by the underlying body because we need to + // retain access to the entire original body on subsequent decode operations + // so we can retain any "dynamic" blocks for types we didn't take consume + // on the first pass. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]hcl.BlockHeaderSchema +} + +func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, diags := b.original.Content(extSchema) + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + return content, diags +} + +func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, _, diags := b.original.PartialContent(extSchema) + // We discard the "remain" argument above because we're going to construct + // our own remain that also takes into account remaining "dynamic" blocks. + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + remain := &expandBody{ + original: b.original, + forEachCtx: b.forEachCtx, + iteration: b.iteration, + hiddenAttrs: make(map[string]struct{}), + hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName, blockS := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = blockS + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = blockS + } + + return content, remain, diags +} + +func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + // If we have any hiddenBlocks then we also need to register those here + // so that a call to "Content" on the underlying body won't fail. + // (We'll filter these out again once we process the result of either + // Content or PartialContent.) + for _, blockS := range b.hiddenBlocks { + extSchema.Blocks = append(extSchema.Blocks, blockS) + } + + // If we have any hiddenAttrs then we also need to register these, for + // the same reason as we deal with hiddenBlocks above. + if len(b.hiddenAttrs) != 0 { + newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) + copy(newAttrs, extSchema.Attributes) + for name := range b.hiddenAttrs { + newAttrs = append(newAttrs, hcl.AttributeSchema{ + Name: name, + Required: false, + }) + } + extSchema.Attributes = newAttrs + } + + return extSchema +} + +func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { + if len(b.hiddenAttrs) == 0 && b.iteration == nil { + // Easy path: just pass through the attrs from the original body verbatim + return rawAttrs + } + + // Otherwise we have some work to do: we must filter out any attributes + // that are hidden (since a previous PartialContent call already saw these) + // and wrap the expressions of the inner attributes so that they will + // have access to our iteration variables. + attrs := make(hcl.Attributes, len(rawAttrs)) + for name, rawAttr := range rawAttrs { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + if b.iteration != nil { + attr := *rawAttr // shallow copy so we can mutate it + attr.Expr = exprWrap{ + Expression: attr.Expr, + i: b.iteration, + } + attrs[name] = &attr + } else { + // If we have no active iteration then no wrapping is required. + attrs[name] = rawAttr + } + } + return attrs +} + +func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { + var blocks hcl.Blocks + var diags hcl.Diagnostics + + for _, rawBlock := range rawBlocks { + switch rawBlock.Type { + case "dynamic": + realBlockType := rawBlock.Labels[0] + if _, hidden := b.hiddenBlocks[realBlockType]; hidden { + continue + } + + var blockS *hcl.BlockHeaderSchema + for _, candidate := range schema.Blocks { + if candidate.Type == realBlockType { + blockS = &candidate + break + } + } + if blockS == nil { + // Not a block type that the caller requested. + if !partial { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), + Subject: &rawBlock.LabelRanges[0], + }) + } + continue + } + + spec, specDiags := b.decodeSpec(blockS, rawBlock) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + continue + } + + if spec.forEachVal.IsKnown() { + for it := spec.forEachVal.ElementIterator(); it.Next(); { + key, value := it.Element() + i := b.iteration.MakeChild(spec.iteratorName, key, value) + + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + // Attach our new iteration context so that attributes + // and other nested blocks can refer to our iterator. + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + } else { + // If our top-level iteration value isn't known then we're forced + // to compromise since HCL doesn't have any concept of an + // "unknown block". In this case then, we'll produce a single + // dynamic block with the iterator values set to DynamicVal, + // which at least makes the potential for a block visible + // in our result, even though it's not represented in a fully-accurate + // way. + i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + block.Body = b.expandChild(block.Body, i) + + // We additionally force all of the leaf attribute values + // in the result to be unknown so the calling application + // can, if necessary, use that as a heuristic to detect + // when a single nested block might be standing in for + // multiple blocks yet to be expanded. This retains the + // structure of the generated body but forces all of its + // leaf attribute values to be unknown. + block.Body = unknownBody{block.Body} + + blocks = append(blocks, block) + } + } + + default: + if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { + // A static block doesn't create a new iteration context, but + // it does need to inherit _our own_ iteration context in + // case it contains expressions that refer to our inherited + // iterators, or nested "dynamic" blocks. + expandedBlock := *rawBlock // shallow copy + expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) + blocks = append(blocks, &expandedBlock) + } + } + } + + return blocks, diags +} + +func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { + chiCtx := i.EvalContext(b.forEachCtx) + ret := Expand(child, chiCtx) + ret.(*expandBody).iteration = i + return ret +} + +func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // blocks aren't allowed in JustAttributes mode and this body can + // only produce blocks, so we'll just pass straight through to our + // underlying body here. + return b.original.JustAttributes() +} + +func (b *expandBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go new file mode 100644 index 000000000000..98a51eadd891 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go @@ -0,0 +1,215 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type expandSpec struct { + blockType string + blockTypeRange hcl.Range + defRange hcl.Range + forEachVal cty.Value + iteratorName string + labelExprs []hcl.Expression + contentBody hcl.Body + inherited map[string]*iteration +} + +func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var schema *hcl.BodySchema + if len(blockS.LabelNames) != 0 { + schema = dynamicBlockBodySchemaLabels + } else { + schema = dynamicBlockBodySchemaNoLabels + } + + specContent, specDiags := rawSpec.Body.Content(schema) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + return nil, diags + } + + //// for_each attribute + + eachAttr := specContent.Attributes["for_each"] + eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) + diags = append(diags, eachDiags...) + + if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { + // We skip this error for DynamicPseudoType because that means we either + // have a null (which is checked immediately below) or an unknown + // (which is handled in the expandBody Content methods). + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + if eachVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: "Cannot use a null value in for_each.", + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + + //// iterator attribute + + iteratorName := blockS.Type + if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { + itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) + diags = append(diags, itDiags...) + if itDiags.HasErrors() { + return nil, diags + } + + if len(itTraversal) != 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic iterator name", + Detail: "Dynamic iterator must be a single variable name.", + Subject: itTraversal.SourceRange().Ptr(), + }) + return nil, diags + } + + iteratorName = itTraversal.RootName() + } + + var labelExprs []hcl.Expression + if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { + var labelDiags hcl.Diagnostics + labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + if len(labelExprs) > len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic block label", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), + }) + return nil, diags + } else if len(labelExprs) < len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient dynamic block labels", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelsAttr.Expr.Range().Ptr(), + }) + return nil, diags + } + } + + // Since our schema requests only blocks of type "content", we can assume + // that all entries in specContent.Blocks are content blocks. + if len(specContent.Blocks) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing dynamic content block", + Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", + Subject: &specContent.MissingItemRange, + }) + return nil, diags + } + if len(specContent.Blocks) > 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic content block", + Detail: "Only one nested content block is allowed for each dynamic block.", + Subject: &specContent.Blocks[1].DefRange, + }) + return nil, diags + } + + return &expandSpec{ + blockType: blockS.Type, + blockTypeRange: rawSpec.LabelRanges[0], + defRange: rawSpec.DefRange, + forEachVal: eachVal, + iteratorName: iteratorName, + labelExprs: labelExprs, + contentBody: specContent.Blocks[0].Body, + }, diags +} + +func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + lCtx := i.EvalContext(ctx) + for _, labelExpr := range s.labelExprs { + labelVal, labelDiags := labelExpr.Value(lCtx) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + var convErr error + labelVal, convErr = convert.Convert(labelVal, cty.String) + if convErr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if labelVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "Cannot use a null value as a dynamic block label.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if !labelVal.IsKnown() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + + labels = append(labels, labelVal.AsString()) + labelRanges = append(labelRanges, labelExpr.Range()) + } + + block := &hcl.Block{ + Type: s.blockType, + TypeRange: s.blockTypeRange, + Labels: labels, + LabelRanges: labelRanges, + DefRange: s.defRange, + Body: s.contentBody, + } + + return block, diags +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go new file mode 100644 index 000000000000..460a1d2a3185 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go @@ -0,0 +1,42 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type exprWrap struct { + hcl.Expression + i *iteration +} + +func (e exprWrap) Variables() []hcl.Traversal { + raw := e.Expression.Variables() + ret := make([]hcl.Traversal, 0, len(raw)) + + // Filter out traversals that refer to our iterator name or any + // iterator we've inherited; we're going to provide those in + // our Value wrapper, so the caller doesn't need to know about them. + for _, traversal := range raw { + rootName := traversal.RootName() + if rootName == e.i.IteratorName { + continue + } + if _, inherited := e.i.Inherited[rootName]; inherited { + continue + } + ret = append(ret, traversal) + } + return ret +} + +func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + extCtx := e.i.EvalContext(ctx) + return e.Expression.Value(extCtx) +} + +// UnwrapExpression returns the expression being wrapped by this instance. +// This allows the original expression to be recovered by hcl.UnwrapExpression. +func (e exprWrap) UnwrapExpression() hcl.Expression { + return e.Expression +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go new file mode 100644 index 000000000000..c566388689b6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go @@ -0,0 +1,66 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type iteration struct { + IteratorName string + Key cty.Value + Value cty.Value + Inherited map[string]*iteration +} + +func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { + return &iteration{ + IteratorName: s.iteratorName, + Key: key, + Value: value, + Inherited: s.inherited, + } +} + +func (i *iteration) Object() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "key": i.Key, + "value": i.Value, + }) +} + +func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { + new := base.NewChild() + + if i != nil { + new.Variables = map[string]cty.Value{} + for name, otherIt := range i.Inherited { + new.Variables[name] = otherIt.Object() + } + new.Variables[i.IteratorName] = i.Object() + } + + return new +} + +func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { + if i == nil { + // Create entirely new root iteration, then + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + } + } + + inherited := map[string]*iteration{} + for name, otherIt := range i.Inherited { + inherited[name] = otherIt + } + inherited[i.IteratorName] = i + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + Inherited: inherited, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go new file mode 100644 index 000000000000..a5bfd94ec726 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go @@ -0,0 +1,47 @@ +// Package dynblock provides an extension to HCL that allows dynamic +// declaration of nested blocks in certain contexts via a special block type +// named "dynamic". +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Expand "dynamic" blocks in the given body, returning a new body that +// has those blocks expanded. +// +// The given EvalContext is used when evaluating "for_each" and "labels" +// attributes within dynamic blocks, allowing those expressions access to +// variables and functions beyond the iterator variable created by the +// iteration. +// +// Expand returns no diagnostics because no blocks are actually expanded +// until a call to Content or PartialContent on the returned body, which +// will then expand only the blocks selected by the schema. +// +// "dynamic" blocks are also expanded automatically within nested blocks +// in the given body, including within other dynamic blocks, thus allowing +// multi-dimensional iteration. However, it is not possible to +// dynamically-generate the "dynamic" blocks themselves except through nesting. +// +// parent { +// dynamic "child" { +// for_each = child_objs +// content { +// dynamic "grandchild" { +// for_each = child.value.children +// labels = [grandchild.key] +// content { +// parent_key = child.key +// value = grandchild.value +// } +// } +// } +// } +// } +func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { + return &expandBody{ + original: body, + forEachCtx: ctx, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go new file mode 100644 index 000000000000..b3907d6eae92 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go @@ -0,0 +1,50 @@ +package dynblock + +import "github.com/hashicorp/hcl/v2" + +var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, +} + +var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + { + Name: "labels", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} + +var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go new file mode 100644 index 000000000000..ce98259a58cd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go @@ -0,0 +1,84 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// unknownBody is a funny body that just reports everything inside it as +// unknown. It uses a given other body as a sort of template for what attributes +// and blocks are inside -- including source location information -- but +// subsitutes unknown values of unknown type for all attributes. +// +// This rather odd process is used to handle expansion of dynamic blocks whose +// for_each expression is unknown. Since a block cannot itself be unknown, +// we instead arrange for everything _inside_ the block to be unknown instead, +// to give the best possible approximation. +type unknownBody struct { + template hcl.Body +} + +var _ hcl.Body = unknownBody{} + +func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, diags := b.template.Content(schema) + content = b.fixupContent(content) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, diags +} + +func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + content, remain, diags := b.template.PartialContent(schema) + content = b.fixupContent(content) + remain = unknownBody{remain} // remaining content must also be wrapped + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, remain, diags +} + +func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs, diags := b.template.JustAttributes() + attrs = b.fixupAttrs(attrs) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return attrs, diags +} + +func (b unknownBody) MissingItemRange() hcl.Range { + return b.template.MissingItemRange() +} + +func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { + ret := &hcl.BodyContent{} + ret.Attributes = b.fixupAttrs(got.Attributes) + if len(got.Blocks) > 0 { + ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) + for _, gotBlock := range got.Blocks { + new := *gotBlock // shallow copy + new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown + ret.Blocks = append(ret.Blocks, &new) + } + } + + return ret +} + +func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { + if len(got) == 0 { + return nil + } + ret := make(hcl.Attributes, len(got)) + for name, gotAttr := range got { + new := *gotAttr // shallow copy + new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) + ret[name] = &new + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go new file mode 100644 index 000000000000..192339295e87 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go @@ -0,0 +1,209 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// WalkVariables begins the recursive process of walking all expressions and +// nested blocks in the given body and its child bodies while taking into +// account any "dynamic" blocks. +// +// This function requires that the caller walk through the nested block +// structure in the given body level-by-level so that an appropriate schema +// can be provided at each level to inform further processing. This workflow +// is thus easiest to use for calling applications that have some higher-level +// schema representation available with which to drive this multi-step +// process. If your application uses the hcldec package, you may be able to +// use VariablesHCLDec instead for a more automatic approach. +func WalkVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + includeContent: true, + } +} + +// WalkExpandVariables is like Variables but it includes only the variables +// required for successful block expansion, ignoring any variables referenced +// inside block contents. The result is the minimal set of all variables +// required for a call to Expand, excluding variables that would only be +// needed to subsequently call Content or PartialContent on the expanded +// body. +func WalkExpandVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + } +} + +type WalkVariablesNode struct { + body hcl.Body + it *iteration + + includeContent bool +} + +type WalkVariablesChild struct { + BlockTypeName string + Node WalkVariablesNode +} + +// Body returns the HCL Body associated with the child node, in case the caller +// wants to do some sort of inspection of it in order to decide what schema +// to pass to Visit. +// +// Most implementations should just fetch a fixed schema based on the +// BlockTypeName field and not access this. Deciding on a schema dynamically +// based on the body is a strange thing to do and generally necessary only if +// your caller is already doing other bizarre things with HCL bodies. +func (c WalkVariablesChild) Body() hcl.Body { + return c.Node.body +} + +// Visit returns the variable traversals required for any "dynamic" blocks +// directly in the body associated with this node, and also returns any child +// nodes that must be visited in order to continue the walk. +// +// Each child node has its associated block type name given in its BlockTypeName +// field, which the calling application should use to determine the appropriate +// schema for the content of each child node and pass it to the child node's +// own Visit method to continue the walk recursively. +func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { + extSchema := n.extendSchema(schema) + container, _, _ := n.body.PartialContent(extSchema) + if container == nil { + return vars, children + } + + children = make([]WalkVariablesChild, 0, len(container.Blocks)) + + if n.includeContent { + for _, attr := range container.Attributes { + for _, traversal := range attr.Expr.Variables() { + var ours, inherited bool + if n.it != nil { + ours = traversal.RootName() == n.it.IteratorName + _, inherited = n.it.Inherited[traversal.RootName()] + } + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + } + + for _, block := range container.Blocks { + switch block.Type { + + case "dynamic": + blockTypeName := block.Labels[0] + inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) + if inner == nil { + continue + } + + iteratorName := blockTypeName + if attr, exists := inner.Attributes["iterator"]; exists { + iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) + if len(iterTraversal) == 0 { + // Ignore this invalid dynamic block, since it'll produce + // an error if someone tries to extract content from it + // later anyway. + continue + } + iteratorName = iterTraversal.RootName() + } + blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) + + if attr, exists := inner.Attributes["for_each"]; exists { + // Filter out iterator names inherited from parent blocks + for _, traversal := range attr.Expr.Variables() { + if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { + vars = append(vars, traversal) + } + } + } + if attr, exists := inner.Attributes["labels"]; exists { + // Filter out both our own iterator name _and_ those inherited + // from parent blocks, since we provide _both_ of these to the + // label expressions. + for _, traversal := range attr.Expr.Variables() { + ours := traversal.RootName() == iteratorName + _, inherited := blockIt.Inherited[traversal.RootName()] + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + + for _, contentBlock := range inner.Blocks { + // We only request "content" blocks in our schema, so we know + // any blocks we find here will be content blocks. We require + // exactly one content block for actual expansion, but we'll + // be more liberal here so that callers can still collect + // variables from erroneous "dynamic" blocks. + children = append(children, WalkVariablesChild{ + BlockTypeName: blockTypeName, + Node: WalkVariablesNode{ + body: contentBlock.Body, + it: blockIt, + includeContent: n.includeContent, + }, + }) + } + + default: + children = append(children, WalkVariablesChild{ + BlockTypeName: block.Type, + Node: WalkVariablesNode{ + body: block.Body, + it: n.it, + includeContent: n.includeContent, + }, + }) + + } + } + + return vars, children +} + +func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + return extSchema +} + +// This is a more relaxed schema than what's in schema.go, since we +// want to maximize the amount of variables we can find even if there +// are erroneous blocks. +var variableDetectionInnerSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: false, + }, + { + Name: "labels", + Required: false, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go new file mode 100644 index 000000000000..907ef3eba723 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go @@ -0,0 +1,43 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" +) + +// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec +// specification to automatically drive the recursive walk through nested +// blocks in the given body. +// +// This is a drop-in replacement for hcldec.Variables which is able to treat +// blocks of type "dynamic" in the same special way that dynblock.Expand would, +// exposing both the variables referenced in the "for_each" and "labels" +// arguments and variables used in the nested "content" block. +func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the +// minimal set of variables required to call Expand, ignoring variables that +// are referenced only inside normal block contents. See WalkExpandVariables +// for more information. +func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkExpandVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { + vars, children := node.Visit(hcldec.ImpliedSchema(spec)) + + if len(children) > 0 { + childSpecs := hcldec.ChildBlockTypes(spec) + for _, child := range children { + if childSpec, exists := childSpecs[child.BlockTypeName]; exists { + vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) + } + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md new file mode 100644 index 000000000000..5d56eeca8326 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/README.md @@ -0,0 +1,44 @@ +# "Try" and "can" functions + +This Go package contains two `cty` functions intended for use in an +`hcl.EvalContext` when evaluating HCL native syntax expressions. + +The first function `try` attempts to evaluate each of its argument expressions +in order until one produces a result without any errors. + +```hcl +try(non_existent_variable, 2) # returns 2 +``` + +If none of the expressions succeed, the function call fails with all of the +errors it encountered. + +The second function `can` is similar except that it ignores the result of +the given expression altogether and simply returns `true` if the expression +produced a successful result or `false` if it produced errors. + +Both of these are primarily intended for working with deep data structures +which might not have a dependable shape. For example, we can use `try` to +attempt to fetch a value from deep inside a data structure but produce a +default value if any step of the traversal fails: + +```hcl +result = try(foo.deep[0].lots.of["traversals"], null) +``` + +The final result to `try` should generally be some sort of constant value that +will always evaluate successfully. + +## Using these functions + +Languages built on HCL can make `try` and `can` available to user code by +exporting them in the `hcl.EvalContext` used for expression evaluation: + +```go +ctx := &hcl.EvalContext{ + Functions: map[string]function.Function{ + "try": tryfunc.TryFunc, + "can": tryfunc.CanFunc, + }, +} +``` diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go new file mode 100644 index 000000000000..2f4862f4a341 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/tryfunc/tryfunc.go @@ -0,0 +1,150 @@ +// Package tryfunc contains some optional functions that can be exposed in +// HCL-based languages to allow authors to test whether a particular expression +// can succeed and take dynamic action based on that result. +// +// These functions are implemented in terms of the customdecode extension from +// the sibling directory "customdecode", and so they are only useful when +// used within an HCL EvalContext. Other systems using cty functions are +// unlikely to support the HCL-specific "customdecode" extension. +package tryfunc + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/customdecode" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TryFunc is a variadic function that tries to evaluate all of is arguments +// in sequence until one succeeds, in which case it returns that result, or +// returns an error if none of them succeed. +var TryFunc function.Function + +// CanFunc tries to evaluate the expression given in its first argument. +var CanFunc function.Function + +func init() { + TryFunc = function.New(&function.Spec{ + VarParam: &function.Parameter{ + Name: "expressions", + Type: customdecode.ExpressionClosureType, + }, + Type: func(args []cty.Value) (cty.Type, error) { + v, err := try(args) + if err != nil { + return cty.NilType, err + } + return v.Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return try(args) + }, + }) + CanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "expression", + Type: customdecode.ExpressionClosureType, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return can(args[0]) + }, + }) +} + +func try(args []cty.Value) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, errors.New("at least one argument is required") + } + + // We'll collect up all of the diagnostics we encounter along the way + // and report them all if none of the expressions succeed, so that the + // user might get some hints on how to make at least one succeed. + var diags hcl.Diagnostics + for _, arg := range args { + closure := customdecode.ExpressionClosureFromVal(arg) + if dependsOnUnknowns(closure.Expression, closure.EvalContext) { + // We can't safely decide if this expression will succeed yet, + // and so our entire result must be unknown until we have + // more information. + return cty.DynamicVal, nil + } + + v, moreDiags := closure.Value() + diags = append(diags, moreDiags...) + if moreDiags.HasErrors() { + continue // try the next one, if there is one to try + } + return v, nil // ignore any accumulated diagnostics if one succeeds + } + + // If we fall out here then none of the expressions succeeded, and so + // we must have at least one diagnostic and we'll return all of them + // so that the user can see the errors related to whichever one they + // were expecting to have succeeded in this case. + // + // Because our function must return a single error value rather than + // diagnostics, we'll construct a suitable error message string + // that will make sense in the context of the function call failure + // diagnostic HCL will eventually wrap this in. + var buf strings.Builder + buf.WriteString("no expression succeeded:\n") + for _, diag := range diags { + if diag.Subject != nil { + buf.WriteString(fmt.Sprintf("- %s (at %s)\n %s\n", diag.Summary, diag.Subject, diag.Detail)) + } else { + buf.WriteString(fmt.Sprintf("- %s\n %s\n", diag.Summary, diag.Detail)) + } + } + buf.WriteString("\nAt least one expression must produce a successful result") + return cty.NilVal, errors.New(buf.String()) +} + +func can(arg cty.Value) (cty.Value, error) { + closure := customdecode.ExpressionClosureFromVal(arg) + if dependsOnUnknowns(closure.Expression, closure.EvalContext) { + // Can't decide yet, then. + return cty.UnknownVal(cty.Bool), nil + } + + _, diags := closure.Value() + if diags.HasErrors() { + return cty.False, nil + } + return cty.True, nil +} + +// dependsOnUnknowns returns true if any of the variables that the given +// expression might access are unknown values or contain unknown values. +// +// This is a conservative result that prefers to return true if there's any +// chance that the expression might derive from an unknown value during its +// evaluation; it is likely to produce false-positives for more complex +// expressions involving deep data structures. +func dependsOnUnknowns(expr hcl.Expression, ctx *hcl.EvalContext) bool { + for _, traversal := range expr.Variables() { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // If the traversal returned a definitive error then it must + // not traverse through any unknowns. + continue + } + if !val.IsWhollyKnown() { + // The value will be unknown if either it refers directly to + // an unknown value or if the traversal moves through an unknown + // collection. We're using IsWhollyKnown, so this also catches + // situations where the traversal refers to a compound data + // structure that contains any unknown values. That's important, + // because during evaluation the expression might evaluate more + // deeply into this structure and encounter the unknowns. + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md new file mode 100644 index 000000000000..058f1e3d8408 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md @@ -0,0 +1,135 @@ +# HCL Type Expressions Extension + +This HCL extension defines a convention for describing HCL types using function +call and variable reference syntax, allowing configuration formats to include +type information provided by users. + +The type syntax is processed statically from a hcl.Expression, so it cannot +use any of the usual language operators. This is similar to type expressions +in statically-typed programming languages. + +```hcl +variable "example" { + type = list(string) +} +``` + +The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` +functions, and so it relies on the underlying syntax to define how "keyword" +and "call" are interpreted. The above shows how they are interpreted in +the HCL native syntax, while the following shows the same information +expressed in JSON: + +```json +{ + "variable": { + "example": { + "type": "list(string)" + } + } +} +``` + +Notice that since we have additional contextual information that we intend +to allow only calls and keywords the JSON syntax is able to parse the given +string directly as an expression, rather than as a template as would be +the case for normal expression evaluation. + +For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). + +## Type Expression Syntax + +When expressed in the native syntax, the following expressions are permitted +in a type expression: + +* `string` - string +* `bool` - boolean +* `number` - number +* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) +* `list()` - list of the type given as an argument +* `set()` - set of the type given as an argument +* `map()` - map of the type given as an argument +* `tuple([])` - tuple with the element types given in the single list argument +* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument + +For example: + +* `list(string)` +* `object({name=string,age=number})` +* `map(object({name=string,age=number}))` + +Note that the object constructor syntax is not fully-general for all possible +object types because it requires the attribute names to be valid identifiers. +In practice it is expected that any time an object type is being fixed for +type checking it will be one that has identifiers as its attributes; object +types with weird attributes generally show up only from arbitrary object +constructors in configuration files, which are usually treated either as maps +or as the dynamic pseudo-type. + +## Type Constraints as Values + +Along with defining a convention for writing down types using HCL expression +constructs, this package also includes a mechanism for representing types as +values that can be used as data within an HCL-based language. + +`typeexpr.TypeConstraintType` is a +[`cty` capsule type](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types) +that encapsulates `cty.Type` values. You can construct such a value directly +using the `TypeConstraintVal` function: + +```go +tyVal := typeexpr.TypeConstraintVal(cty.String) + +// We can unpack the type from a value using TypeConstraintFromVal +ty := typeExpr.TypeConstraintFromVal(tyVal) +``` + +However, the primary purpose of `typeexpr.TypeConstraintType` is to be +specified as the type constraint for an argument, in which case it serves +as a signal for HCL to treat the argument expression as a type constraint +expression as defined above, rather than as a normal value expression. + +"An argument" in the above in practice means the following two locations: + +* As the type constraint for a parameter of a cty function that will be + used in an `hcl.EvalContext`. In that case, function calls in the HCL + native expression syntax will require the argument to be valid type constraint + expression syntax and the function implementation will receive a + `TypeConstraintType` value as the argument value for that parameter. + +* As the type constraint for a `hcldec.AttrSpec` or `hcldec.BlockAttrsSpec` + when decoding an HCL body using `hcldec`. In that case, the attributes + with that type constraint will be required to be valid type constraint + expression syntax and the result will be a `TypeConstraintType` value. + +Note that the special handling of these arguments means that an argument +marked in this way must use the type constraint syntax directly. It is not +valid to pass in a value of `TypeConstraintType` that has been obtained +dynamically via some other expression result. + +`TypeConstraintType` is provided with the intent of using it internally within +application code when incorporating type constraint expression syntax into +an HCL-based language, not to be used for dynamic "programming with types". A +calling application could support programming with types by defining its _own_ +capsule type, but that is not the purpose of `TypeConstraintType`. + +## The "convert" `cty` Function + +Building on the `TypeConstraintType` described in the previous section, this +package also provides `typeexpr.ConvertFunc` which is a cty function that +can be placed into a `cty.EvalContext` (conventionally named "convert") in +order to provide a general type conversion function in an HCL-based language: + +```hcl + foo = convert("true", bool) +``` + +The second parameter uses the mechanism described in the previous section to +require its argument to be a type constraint expression rather than a value +expression. In doing so, it allows converting with any type constraint that +can be expressed in this package's type constraint syntax. In the above example, +the `foo` argument would receive a boolean true, or `cty.True` in `cty` terms. + +The target type constraint must always be provided statically using inline +type constraint syntax. There is no way to _dynamically_ select a type +constraint using this function. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go new file mode 100644 index 000000000000..c4b379579d46 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go @@ -0,0 +1,11 @@ +// Package typeexpr extends HCL with a convention for describing HCL types +// within configuration files. +// +// The type syntax is processed statically from a hcl.Expression, so it cannot +// use any of the usual language operators. This is similar to type expressions +// in statically-typed programming languages. +// +// variable "example" { +// type = list(string) +// } +package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go new file mode 100644 index 000000000000..11b06897986c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go @@ -0,0 +1,196 @@ +package typeexpr + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +const invalidTypeSummary = "Invalid type specification" + +// getType is the internal implementation of both Type and TypeConstraint, +// using the passed flag to distinguish. When constraint is false, the "any" +// keyword will produce an error. +func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { + // First we'll try for one of our keywords + kw := hcl.ExprAsKeyword(expr) + switch kw { + case "bool": + return cty.Bool, nil + case "string": + return cty.String, nil + case "number": + return cty.Number, nil + case "any": + if constraint { + return cty.DynamicPseudoType, nil + } + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), + Subject: expr.Range().Ptr(), + }} + case "list", "map", "set": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), + Subject: expr.Range().Ptr(), + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: expr.Range().Ptr(), + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: expr.Range().Ptr(), + }} + case "": + // okay! we'll fall through and try processing as a call, then. + default: + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), + Subject: expr.Range().Ptr(), + }} + } + + // If we get down here then our expression isn't just a keyword, so we'll + // try to process it as a call instead. + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", + Subject: expr.Range().Ptr(), + }} + } + + switch call.Name { + case "bool", "string", "number", "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} + } + + if len(call.Arguments) != 1 { + contextRange := call.ArgsRange + subjectRange := call.ArgsRange + if len(call.Arguments) > 1 { + // If we have too many arguments (as opposed to too _few_) then + // we'll highlight the extraneous arguments as the diagnostic + // subject. + subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) + } + + switch call.Name { + case "list", "set", "map": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), + Subject: &subjectRange, + Context: &contextRange, + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: &subjectRange, + Context: &contextRange, + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: &subjectRange, + Context: &contextRange, + }} + } + } + + switch call.Name { + + case "list": + ety, diags := getType(call.Arguments[0], constraint) + return cty.List(ety), diags + case "set": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Set(ety), diags + case "map": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Map(ety), diags + case "object": + attrDefs, diags := hcl.ExprMap(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + + atys := make(map[string]cty.Type) + for _, attrDef := range attrDefs { + attrName := hcl.ExprAsKeyword(attrDef.Key) + if attrName == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object constructor map keys must be attribute names.", + Subject: attrDef.Key.Range().Ptr(), + Context: expr.Range().Ptr(), + }) + continue + } + aty, attrDiags := getType(attrDef.Value, constraint) + diags = append(diags, attrDiags...) + atys[attrName] = aty + } + return cty.Object(atys), diags + case "tuple": + elemDefs, diags := hcl.ExprList(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Tuple type constructor requires a list of element types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + etys := make([]cty.Type, len(elemDefs)) + for i, defExpr := range elemDefs { + ety, elemDiags := getType(defExpr, constraint) + diags = append(diags, elemDiags...) + etys[i] = ety + } + return cty.Tuple(etys), diags + default: + // Can't access call.Arguments in this path because we've not validated + // that it contains exactly one expression here. + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), + Subject: expr.Range().Ptr(), + }} + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go new file mode 100644 index 000000000000..3b8f618fbcd1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go @@ -0,0 +1,129 @@ +package typeexpr + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// Type attempts to process the given expression as a type expression and, if +// successful, returns the resulting type. If unsuccessful, error diagnostics +// are returned. +func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, false) +} + +// TypeConstraint attempts to parse the given expression as a type constraint +// and, if successful, returns the resulting type. If unsuccessful, error +// diagnostics are returned. +// +// A type constraint has the same structure as a type, but it additionally +// allows the keyword "any" to represent cty.DynamicPseudoType, which is often +// used as a wildcard in type checking and type conversion operations. +func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, true) +} + +// TypeString returns a string rendering of the given type as it would be +// expected to appear in the HCL native syntax. +// +// This is primarily intended for showing types to the user in an application +// that uses typexpr, where the user can be assumed to be familiar with the +// type expression syntax. In applications that do not use typeexpr these +// results may be confusing to the user and so type.FriendlyName may be +// preferable, even though it's less precise. +// +// TypeString produces reasonable results only for types like what would be +// produced by the Type and TypeConstraint functions. In particular, it cannot +// support capsule types. +func TypeString(ty cty.Type) string { + // Easy cases first + switch ty { + case cty.String: + return "string" + case cty.Bool: + return "bool" + case cty.Number: + return "number" + case cty.DynamicPseudoType: + return "any" + } + + if ty.IsCapsuleType() { + panic("TypeString does not support capsule types") + } + + if ty.IsCollectionType() { + ety := ty.ElementType() + etyString := TypeString(ety) + switch { + case ty.IsListType(): + return fmt.Sprintf("list(%s)", etyString) + case ty.IsSetType(): + return fmt.Sprintf("set(%s)", etyString) + case ty.IsMapType(): + return fmt.Sprintf("map(%s)", etyString) + default: + // Should never happen because the above is exhaustive + panic("unsupported collection type") + } + } + + if ty.IsObjectType() { + var buf bytes.Buffer + buf.WriteString("object({") + atys := ty.AttributeTypes() + names := make([]string, 0, len(atys)) + for name := range atys { + names = append(names, name) + } + sort.Strings(names) + first := true + for _, name := range names { + aty := atys[name] + if !first { + buf.WriteByte(',') + } + if !hclsyntax.ValidIdentifier(name) { + // Should never happen for any type produced by this package, + // but we'll do something reasonable here just so we don't + // produce garbage if someone gives us a hand-assembled object + // type that has weird attribute names. + // Using Go-style quoting here isn't perfect, since it doesn't + // exactly match HCL syntax, but it's fine for an edge-case. + buf.WriteString(fmt.Sprintf("%q", name)) + } else { + buf.WriteString(name) + } + buf.WriteByte('=') + buf.WriteString(TypeString(aty)) + first = false + } + buf.WriteString("})") + return buf.String() + } + + if ty.IsTupleType() { + var buf bytes.Buffer + buf.WriteString("tuple([") + etys := ty.TupleElementTypes() + first := true + for _, ety := range etys { + if !first { + buf.WriteByte(',') + } + buf.WriteString(TypeString(ety)) + first = false + } + buf.WriteString("])") + return buf.String() + } + + // Should never happen because we covered all cases above. + panic(fmt.Errorf("unsupported type %#v", ty)) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go new file mode 100644 index 000000000000..5462d82c3c74 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/type_type.go @@ -0,0 +1,118 @@ +package typeexpr + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/customdecode" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// TypeConstraintType is a cty capsule type that allows cty type constraints to +// be used as values. +// +// If TypeConstraintType is used in a context supporting the +// customdecode.CustomExpressionDecoder extension then it will implement +// expression decoding using the TypeConstraint function, thus allowing +// type expressions to be used in contexts where value expressions might +// normally be expected, such as in arguments to function calls. +var TypeConstraintType cty.Type + +// TypeConstraintVal constructs a cty.Value whose type is +// TypeConstraintType. +func TypeConstraintVal(ty cty.Type) cty.Value { + return cty.CapsuleVal(TypeConstraintType, &ty) +} + +// TypeConstraintFromVal extracts the type from a cty.Value of +// TypeConstraintType that was previously constructed using TypeConstraintVal. +// +// If the given value isn't a known, non-null value of TypeConstraintType +// then this function will panic. +func TypeConstraintFromVal(v cty.Value) cty.Type { + if !v.Type().Equals(TypeConstraintType) { + panic("value is not of TypeConstraintType") + } + ptr := v.EncapsulatedValue().(*cty.Type) + return *ptr +} + +// ConvertFunc is a cty function that implements type conversions. +// +// Its signature is as follows: +// convert(value, type_constraint) +// +// ...where type_constraint is a type constraint expression as defined by +// typeexpr.TypeConstraint. +// +// It relies on HCL's customdecode extension and so it's not suitable for use +// in non-HCL contexts or if you are using a HCL syntax implementation that +// does not support customdecode for function arguments. However, it _is_ +// supported for function calls in the HCL native expression syntax. +var ConvertFunc function.Function + +func init() { + TypeConstraintType = cty.CapsuleWithOps("type constraint", reflect.TypeOf(cty.Type{}), &cty.CapsuleOps{ + ExtensionData: func(key interface{}) interface{} { + switch key { + case customdecode.CustomExpressionDecoder: + return customdecode.CustomExpressionDecoderFunc( + func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + ty, diags := TypeConstraint(expr) + if diags.HasErrors() { + return cty.NilVal, diags + } + return TypeConstraintVal(ty), nil + }, + ) + default: + return nil + } + }, + TypeGoString: func(_ reflect.Type) string { + return "typeexpr.TypeConstraintType" + }, + GoString: func(raw interface{}) string { + tyPtr := raw.(*cty.Type) + return fmt.Sprintf("typeexpr.TypeConstraintVal(%#v)", *tyPtr) + }, + RawEquals: func(a, b interface{}) bool { + aPtr := a.(*cty.Type) + bPtr := b.(*cty.Type) + return (*aPtr).Equals(*bPtr) + }, + }) + + ConvertFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowDynamicType: true, + }, + { + Name: "type", + Type: TypeConstraintType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + wantTypePtr := args[1].EncapsulatedValue().(*cty.Type) + got, err := convert.Convert(args[0], *wantTypePtr) + if err != nil { + return cty.NilType, function.NewArgError(0, err) + } + return got.Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + v, err := convert.Convert(args[0], retType) + if err != nil { + return cty.NilVal, function.NewArgError(0, err) + } + return v, nil + }, + }) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go index f0d589d77748..ac3ac2501b49 100644 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go @@ -11,6 +11,35 @@ import ( "github.com/zclconf/go-cty/cty/gocty" ) +// ExpressionDecoderFunc represents custom expression decoder for a specific type +type ExpressionDecoderFunc func(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics + +// BodyDecoderFunc represents custom body decoder for a specific type +type BodyDecoderFunc func(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics + +type Decoder struct { + exprConvertors map[reflect.Type]ExpressionDecoderFunc + bodyConvertors map[reflect.Type]BodyDecoderFunc +} + +var global = &Decoder{} + +// RegisterExpressionDecoder registers a custom expression decoder for a target type. +func (d *Decoder) RegisterExpressionDecoder(typ reflect.Type, fn ExpressionDecoderFunc) { + if d.exprConvertors == nil { + d.exprConvertors = map[reflect.Type]ExpressionDecoderFunc{} + } + d.exprConvertors[typ] = fn +} + +// RegisterBlockDecoder registers a custom block decoder for a target type. +func (d *Decoder) RegisterBlockDecoder(typ reflect.Type, fn BodyDecoderFunc) { + if d.bodyConvertors == nil { + d.bodyConvertors = map[reflect.Type]BodyDecoderFunc{} + } + d.bodyConvertors[typ] = fn +} + // DecodeBody extracts the configuration within the given body into the given // value. This value must be a non-nil pointer to either a struct or // a map, where in the former case the configuration will be decoded using @@ -28,27 +57,51 @@ import ( // may still be accessed by a careful caller for static analysis and editor // integration use-cases. func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + return global.DecodeBody(body, ctx, val) +} + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func (d *Decoder) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { rv := reflect.ValueOf(val) if rv.Kind() != reflect.Ptr { panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) } - return decodeBodyToValue(body, ctx, rv.Elem()) + return d.decodeBodyToValue(body, ctx, rv.Elem()) } -func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { +func (d *Decoder) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { et := val.Type() switch et.Kind() { case reflect.Struct: - return decodeBodyToStruct(body, ctx, val) + return d.decodeBodyToStruct(body, ctx, val) case reflect.Map: - return decodeBodyToMap(body, ctx, val) + return d.decodeBodyToMap(body, ctx, val) default: panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) } } -func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { +func (d *Decoder) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + if fn, ok := d.bodyConvertors[val.Type()]; ok { + return fn(body, ctx, val.Addr().Interface()) + } + schema, partial := ImpliedBodySchema(val.Interface()) var content *hcl.BodyContent @@ -79,7 +132,7 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) } fieldV.Set(reflect.ValueOf(attrs)) default: - diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + diags = append(diags, d.decodeBodyToValue(leftovers, ctx, fieldV)...) } } @@ -108,7 +161,7 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) case exprType.AssignableTo(field.Type): fieldV.Set(reflect.ValueOf(attr.Expr)) default: - diags = append(diags, DecodeExpression( + diags = append(diags, d.DecodeExpression( attr.Expr, ctx, fieldV.Addr().Interface(), )...) } @@ -123,6 +176,7 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) ty := field.Type isSlice := false isPtr := false + isMap := false if ty.Kind() == reflect.Slice { isSlice = true ty = ty.Elem() @@ -131,8 +185,11 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) isPtr = true ty = ty.Elem() } + if ty.Kind() == reflect.Map { + isMap = true + } - if len(blocks) > 1 && !isSlice { + if len(blocks) > 1 && !isSlice && !(isMap && len(blocks[0].Labels) == 1) { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: fmt.Sprintf("Duplicate %s block", typeName), @@ -146,7 +203,7 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) } if len(blocks) == 0 { - if isSlice || isPtr { + if isSlice || isPtr || isMap { if val.Field(fieldIdx).IsNil() { val.Field(fieldIdx).Set(reflect.Zero(field.Type)) } @@ -182,10 +239,10 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) if v.IsNil() { v = reflect.New(ty) } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + diags = append(diags, d.decodeBlockToValue(block, ctx, v.Elem())...) sli.Index(i).Set(v) } else { - diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + diags = append(diags, d.decodeBlockToValue(block, ctx, sli.Index(i))...) } } @@ -194,6 +251,37 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) } val.Field(fieldIdx).Set(sli) + case isMap && len(blocks[0].Labels) == 1: + v := val.Field(fieldIdx) + if v.IsNil() { + v.Set(reflect.MakeMap(ty)) + } + + for _, block := range blocks { + tyv := ty.Elem() + isPtr := false + if tyv.Kind() == reflect.Ptr { + isPtr = true + tyv = tyv.Elem() + } + ev := reflect.New(tyv) + diags = append(diags, d.decodeBodyToValue(block.Body, ctx, ev.Elem())...) + + blockTags := getFieldTags(tyv) + lv := block.Labels[0] + lfieldIdx := blockTags.Labels[0].FieldIndex + f := ev.Elem().Field(lfieldIdx) + if f.Kind() == reflect.Ptr { + f.Set(reflect.ValueOf(&lv)) + } else { + f.SetString(lv) + } + + if !isPtr { + ev = ev.Elem() + } + v.SetMapIndex(reflect.ValueOf(lv), ev) + } default: block := blocks[0] @@ -202,10 +290,10 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) if v.IsNil() { v = reflect.New(ty) } - diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + diags = append(diags, d.decodeBlockToValue(block, ctx, v.Elem())...) val.Field(fieldIdx).Set(v) } else { - diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + diags = append(diags, d.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) } } @@ -215,7 +303,7 @@ func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) return diags } -func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { +func (d *Decoder) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { attrs, diags := body.JustAttributes() if attrs == nil { return diags @@ -241,7 +329,7 @@ func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.D return diags } -func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { +func (d *Decoder) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { var diags hcl.Diagnostics ty := v.Type() @@ -258,13 +346,18 @@ func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) } v.Elem().Set(reflect.ValueOf(attrs)) default: - diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + diags = append(diags, d.decodeBodyToValue(block.Body, ctx, v)...) if len(block.Labels) > 0 { blockTags := getFieldTags(ty) for li, lv := range block.Labels { lfieldIdx := blockTags.Labels[li].FieldIndex - v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + f := v.Field(lfieldIdx) + if f.Kind() == reflect.Ptr { + f.Set(reflect.ValueOf(&lv)) + } else { + f.SetString(lv) + } } } @@ -288,6 +381,28 @@ func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) // may still be accessed by a careful caller for static analysis and editor // integration use-cases. func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + return global.DecodeExpression(expr, ctx, val) +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func (d *Decoder) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + if diags, ok := d.decodeCustomExpression(expr, ctx, val); ok { + return diags + } + srcVal, diags := expr.Value(ctx) convTy, err := gocty.ImpliedType(val) @@ -320,3 +435,13 @@ func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{} return diags } + +func (d *Decoder) decodeCustomExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) (hcl.Diagnostics, bool) { + ty := reflect.TypeOf(val).Elem() + fn, ok := d.exprConvertors[ty] + if !ok { + return nil, false + } + diags := fn(expr, ctx, val) + return diags, true +} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go index ecf6ddbac627..73d2ec3b3680 100644 --- a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go @@ -80,19 +80,40 @@ func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { if fty.Kind() == reflect.Ptr { fty = fty.Elem() } - if fty.Kind() != reflect.Struct { + + var labelNames []string + + switch fty.Kind() { + case reflect.Struct: + ftags := getFieldTags(fty) + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + case reflect.Map: + fme := fty.Elem() + if fme.Kind() == reflect.Slice { + fme = fme.Elem() + } + if fme.Kind() == reflect.Ptr { + fme = fme.Elem() + } + if fme.Kind() == reflect.Struct { + ftags := getFieldTags(fme) + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + } + default: panic(fmt.Sprintf( "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, )) } - ftags := getFieldTags(fty) - var labelNames []string - if len(ftags.Labels) > 0 { - labelNames = make([]string, len(ftags.Labels)) - for i, l := range ftags.Labels { - labelNames[i] = l.Name - } - } blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ Type: n, diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go index a70818e1b5bc..b3cb1f84d9df 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go @@ -1565,6 +1565,52 @@ func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels [] return s.Wrapped.sourceRange(content, blockLabels) } +// ValidateFuncSpec is a spec that allows for extended +// developer-defined validation. The validation function receives the +// result of the wrapped spec. +// +// The Subject field of the returned Diagnostic is optional. If not +// specified, it is automatically populated with the range covered by +// the wrapped spec. +// +type ValidateSpec struct { + Wrapped Spec + Func func(value cty.Value) hcl.Diagnostics +} + +func (s *ValidateSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *ValidateSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + validateDiags := s.Func(wrappedVal) + // Auto-populate the Subject fields if they weren't set. + for i := range validateDiags { + if validateDiags[i].Subject == nil { + validateDiags[i].Subject = s.sourceRange(content, blockLabels).Ptr() + } + } + + diags = append(diags, validateDiags...) + return wrappedVal, diags +} + +func (s *ValidateSpec) impliedType() cty.Type { + return s.Wrapped.impliedType() +} + +func (s *ValidateSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Wrapped.sourceRange(content, blockLabels) +} + // noopSpec is a placeholder spec that does nothing, used in situations where // a non-nil placeholder spec is required. It is not exported because there is // no reason to use it directly; it is always an implementation detail only. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go index 3fe84ddc383e..7576781a7857 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -260,6 +260,20 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } switch { + case expandVal.Type().Equals(cty.DynamicPseudoType): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + return cty.DynamicVal, diags case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): if expandVal.IsNull() { diags = append(diags, &hcl.Diagnostic{ @@ -406,22 +420,39 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti } else { param = varParam } - argExpr := e.Args[i] - // TODO: we should also unpick a PathError here and show the - // path to the deep value where the error was detected. - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid function argument", - Detail: fmt.Sprintf( - "Invalid value for %q parameter: %s.", - param.Name, err, - ), - Subject: argExpr.StartRange().Ptr(), - Context: e.Range().Ptr(), - Expression: argExpr, - EvalContext: ctx, - }) + // this can happen if an argument is (incorrectly) null. + if i > len(e.Args)-1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + } else { + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } default: diags = append(diags, &hcl.Diagnostic{ @@ -940,6 +971,9 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { if collVal.Type() == cty.DynamicPseudoType { return cty.DynamicVal, diags } + // Unmark collection before checking for iterability, because marked + // values cannot be iterated + collVal, marks := collVal.Unmark() if !collVal.CanIterateElements() { diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, @@ -1147,7 +1181,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { } } - return cty.ObjectVal(vals), diags + return cty.ObjectVal(vals).WithMarks(marks), diags } else { // Producing a tuple @@ -1223,7 +1257,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { return cty.DynamicVal, diags } - return cty.TupleVal(vals), diags + return cty.TupleVal(vals).WithMarks(marks), diags } } diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go index 9d425115f957..ff9a6e5c63d1 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -26,6 +26,9 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) var diags hcl.Diagnostics isKnown := true + // Maintain a set of marks for values used in the template + marks := make(cty.ValueMarks) + for _, part := range e.Parts { partVal, partDiags := part.Value(ctx) diags = append(diags, partDiags...) @@ -71,14 +74,24 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) continue } - buf.WriteString(strVal.AsString()) + // Unmark the part and merge its marks into the set + unmarked, partMarks := strVal.Unmark() + for k, v := range partMarks { + marks[k] = v + } + + buf.WriteString(unmarked.AsString()) } + var ret cty.Value if !isKnown { - return cty.UnknownVal(cty.String), diags + ret = cty.UnknownVal(cty.String) + } else { + ret = cty.StringVal(buf.String()) } - return cty.StringVal(buf.String()), diags + // Apply the full set of marks to the returned value + return ret.WithMarks(marks), diags } func (e *TemplateExpr) Range() hcl.Range { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go index 688b90ca6d50..59f4c4347871 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -202,7 +202,7 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { case TokenBitwiseAnd: suggestion = " Did you mean boolean AND (\"&&\")?" case TokenBitwiseOr: - suggestion = " Did you mean boolean OR (\"&&\")?" + suggestion = " Did you mean boolean OR (\"||\")?" case TokenBitwiseNot: suggestion = " Did you mean boolean NOT (\"!\")?" } @@ -294,12 +294,23 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { Subject: &tok.Range, }) case TokenInvalid: - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Invalid character", - Detail: "This character is not used within the language.", - Subject: &tok.Range, - }) + chars := string(tok.Bytes) + switch chars { + case "“", "”": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "\"Curly quotes\" are not valid here. These can sometimes be inadvertently introduced when sharing code via documents or discussion forums. It might help to replace the character with a \"straight quote\".", + Subject: &tok.Range, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "This character is not used within the language.", + Subject: &tok.Range, + }) + } } } return diags diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go index f7d3921b45db..2bcda5b37211 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/ast_block.go @@ -10,7 +10,7 @@ type Block struct { leadComments *node typeName *node - labels nodeSet + labels *node open *node body *node close *node @@ -19,7 +19,6 @@ type Block struct { func newBlock() *Block { return &Block{ inTree: newInTree(), - labels: newNodeSet(), } } @@ -35,12 +34,8 @@ func (b *Block) init(typeName string, labels []string) { nameObj := newIdentifier(nameTok) b.leadComments = b.children.Append(newComments(nil)) b.typeName = b.children.Append(nameObj) - for _, label := range labels { - labelToks := TokensForValue(cty.StringVal(label)) - labelObj := newQuoted(labelToks) - labelNode := b.children.Append(labelObj) - b.labels.Add(labelNode) - } + labelsObj := newBlockLabels(labels) + b.labels = b.children.Append(labelsObj) b.open = b.children.AppendUnstructuredTokens(Tokens{ { Type: hclsyntax.TokenOBrace, @@ -79,10 +74,68 @@ func (b *Block) Type() string { return string(typeNameObj.token.Bytes) } +// SetType updates the type name of the block to a given name. +func (b *Block) SetType(typeName string) { + nameTok := newIdentToken(typeName) + nameObj := newIdentifier(nameTok) + b.typeName.ReplaceWith(nameObj) +} + // Labels returns the labels of the block. func (b *Block) Labels() []string { - labelNames := make([]string, 0, len(b.labels)) - list := b.labels.List() + return b.labelsObj().Current() +} + +// SetLabels updates the labels of the block to given labels. +// Since we cannot assume that old and new labels are equal in length, +// remove old labels and insert new ones before TokenOBrace. +func (b *Block) SetLabels(labels []string) { + b.labelsObj().Replace(labels) +} + +// labelsObj returns the internal node content representation of the block +// labels. This is not part of the public API because we're intentionally +// exposing only a limited API to get/set labels on the block itself in a +// manner similar to the main hcl.Block type, but our block accessors all +// use this to get the underlying node content to work with. +func (b *Block) labelsObj() *blockLabels { + return b.labels.content.(*blockLabels) +} + +type blockLabels struct { + inTree + + items nodeSet +} + +func newBlockLabels(labels []string) *blockLabels { + ret := &blockLabels{ + inTree: newInTree(), + items: newNodeSet(), + } + + ret.Replace(labels) + return ret +} + +func (bl *blockLabels) Replace(newLabels []string) { + bl.inTree.children.Clear() + bl.items.Clear() + + for _, label := range newLabels { + labelToks := TokensForValue(cty.StringVal(label)) + // Force a new label to use the quoted form, which is the idiomatic + // form. The unquoted form is supported in HCL 2 only for compatibility + // with historical use in HCL 1. + labelObj := newQuoted(labelToks) + labelNode := bl.children.Append(labelObj) + bl.items.Add(labelNode) + } +} + +func (bl *blockLabels) Current() []string { + labelNames := make([]string, 0, len(bl.items)) + list := bl.items.List() for _, label := range list { switch labelObj := label.content.(type) { diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go index 45669f7f987b..d3a5b72c9ea6 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go @@ -130,6 +130,36 @@ func (ns *nodes) AppendNode(n *node) { } } +// Insert inserts a nodeContent at a given position. +// This is just a wrapper for InsertNode. See InsertNode for details. +func (ns *nodes) Insert(pos *node, c nodeContent) *node { + n := &node{ + content: c, + } + ns.InsertNode(pos, n) + n.list = ns + return n +} + +// InsertNode inserts a node at a given position. +// The first argument is a node reference before which to insert. +// To insert it to an empty list, set position to nil. +func (ns *nodes) InsertNode(pos *node, n *node) { + if pos == nil { + // inserts n to empty list. + ns.first = n + ns.last = n + } else { + // inserts n before pos. + pos.before.after = n + n.before = pos.before + pos.before = n + n.after = pos + } + + n.list = ns +} + func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { if len(tokens) == 0 { return nil @@ -177,6 +207,12 @@ func (ns nodeSet) Remove(n *node) { delete(ns, n) } +func (ns nodeSet) Clear() { + for n := range ns { + delete(ns, n) + } +} + func (ns nodeSet) List() []*node { if len(ns) == 0 { return nil diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go index 8e100fb5fb0b..3df51447ae04 100644 --- a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go @@ -289,7 +289,6 @@ func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineCom func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { block := &Block{ inTree: newInTree(), - labels: newNodeSet(), } children := block.inTree.children @@ -312,24 +311,13 @@ func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, children.AppendNode(in) } - for _, rng := range nativeBlock.LabelRanges { - var labelTokens inputTokens - before, labelTokens, from = from.Partition(rng) - children.AppendUnstructuredTokens(before.Tokens()) - tokens := labelTokens.Tokens() - var ln *node - if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { - ln = newNode(newIdentifier(tokens[0])) - } else { - ln = newNode(newQuoted(tokens)) - } - block.labels.Add(ln) - children.AppendNode(ln) - } + before, labelsNode, from := parseBlockLabels(nativeBlock, from) + block.labels = labelsNode + children.AppendNode(labelsNode) before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) children.AppendUnstructuredTokens(before.Tokens()) - children.AppendUnstructuredTokens(oBrace.Tokens()) + block.open = children.AppendUnstructuredTokens(oBrace.Tokens()) // We go a bit out of order here: we go hunting for the closing brace // so that we have a delimited body, but then we'll deal with the body @@ -342,7 +330,7 @@ func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, children.AppendNode(body) children.AppendUnstructuredTokens(after.Tokens()) - children.AppendUnstructuredTokens(cBrace.Tokens()) + block.close = children.AppendUnstructuredTokens(cBrace.Tokens()) // stragglers children.AppendUnstructuredTokens(from.Tokens()) @@ -356,6 +344,34 @@ func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, return newNode(block) } +func parseBlockLabels(nativeBlock *hclsyntax.Block, from inputTokens) (inputTokens, *node, inputTokens) { + labelsObj := newBlockLabels(nil) + children := labelsObj.children + + var beforeAll inputTokens + for i, rng := range nativeBlock.LabelRanges { + var before, labelTokens inputTokens + before, labelTokens, from = from.Partition(rng) + if i == 0 { + beforeAll = before + } else { + children.AppendUnstructuredTokens(before.Tokens()) + } + tokens := labelTokens.Tokens() + var ln *node + if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { + ln = newNode(newIdentifier(tokens[0])) + } else { + ln = newNode(newQuoted(tokens)) + } + labelsObj.items.Add(ln) + children.AppendNode(ln) + } + + after := from + return beforeAll, newNode(labelsObj), after +} + func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { expr := newExpression() children := expr.inTree.children @@ -521,8 +537,8 @@ func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { // The tokens are assumed to be in source order and non-overlapping, which // will be true if the token sequence from the scanner is used directly. func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { - // We us a linear search here because we assume tha in most cases our - // target range is close to the beginning of the sequence, and the seqences + // We use a linear search here because we assume that in most cases our + // target range is close to the beginning of the sequence, and the sequences // are generally small for most reasonable files anyway. for i := 0; ; i++ { if i >= len(toks) { diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go index 7a54c51b6e14..6b7420b9eb2e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/json/parser.go +++ b/vendor/github.com/hashicorp/hcl/v2/json/parser.go @@ -8,15 +8,23 @@ import ( "github.com/zclconf/go-cty/cty" ) -func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { - tokens := scan(buf, pos{ - Filename: filename, - Pos: hcl.Pos{ - Byte: 0, - Line: 1, - Column: 1, - }, - }) +func parseFileContent(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{Filename: filename, Pos: start}) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseExpression(buf []byte, filename string, start hcl.Pos) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{Filename: filename, Pos: start}) p := newPeeker(tokens) node, diags := parseValue(p) if len(diags) == 0 && p.Peek().Type != tokenEOF { diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go index 8dc4a36afe61..d1e4faf59b37 100644 --- a/vendor/github.com/hashicorp/hcl/v2/json/public.go +++ b/vendor/github.com/hashicorp/hcl/v2/json/public.go @@ -18,7 +18,16 @@ import ( // from its HasErrors method. If HasErrors returns true, the file represents // the subset of data that was able to be parsed, which may be none. func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { - rootNode, diags := parseFileContent(src, filename) + return ParseWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) +} + +// ParseWithStartPos attempts to parse like json.Parse, but unlike json.Parse +// you can pass a start position of the given JSON as a hcl.Pos. +// +// In most cases json.Parse should be sufficient, but it can be useful for parsing +// a part of JSON with correct positions. +func ParseWithStartPos(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename, start) switch rootNode.(type) { case *objectVal, *arrayVal: @@ -62,6 +71,20 @@ func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { return file, diags } +// ParseExpression parses the given buffer as a standalone JSON expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string) (hcl.Expression, hcl.Diagnostics) { + return ParseExpressionWithStartPos(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) +} + +// ParseExpressionWithStartPos parses like json.ParseExpression, but unlike +// json.ParseExpression you can pass a start position of the given JSON +// expression as a hcl.Pos. +func ParseExpressionWithStartPos(src []byte, filename string, start hcl.Pos) (hcl.Expression, hcl.Diagnostics) { + node, diags := parseExpression(src, filename, start) + return &expression{src: node}, diags +} + // ParseFile is a convenience wrapper around Parse that first attempts to load // data from the given filename, passing the result to Parse if successful. // diff --git a/vendor/github.com/hashicorp/hcl/v2/ops.go b/vendor/github.com/hashicorp/hcl/v2/ops.go index 5d2910c13010..20c803919fef 100644 --- a/vendor/github.com/hashicorp/hcl/v2/ops.go +++ b/vendor/github.com/hashicorp/hcl/v2/ops.go @@ -217,7 +217,12 @@ func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagno } idx := cty.StringVal(attrName) - if obj.HasIndex(idx).False() { + + // Here we drop marks from HasIndex result, in order to allow basic + // traversal of a marked map in the same way we can traverse a marked + // object + hasIndex, _ := obj.HasIndex(idx).Unmark() + if hasIndex.False() { return cty.DynamicVal, Diagnostics{ { Severity: DiagError, diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal.go b/vendor/github.com/hashicorp/hcl/v2/traversal.go index d710197008c7..712e440b786e 100644 --- a/vendor/github.com/hashicorp/hcl/v2/traversal.go +++ b/vendor/github.com/hashicorp/hcl/v2/traversal.go @@ -2,6 +2,7 @@ package hcl import ( "fmt" + "strings" "github.com/zclconf/go-cty/cty" ) @@ -70,12 +71,18 @@ func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { thisCtx := ctx hasNonNil := false + var unknownHandler func(string) (cty.Value, error) for thisCtx != nil { + if unknownHandler == nil && thisCtx.UnknownVariable != nil { + unknownHandler = thisCtx.UnknownVariable + } + if thisCtx.Variables == nil { thisCtx = thisCtx.parent continue } hasNonNil = true + val, exists := thisCtx.Variables[name] if exists { return split.Rel.TraverseRel(val) @@ -83,6 +90,13 @@ func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { thisCtx = thisCtx.parent } + if unknownHandler != nil { + v, err := unknownHandler(t.toStringValue()) + if err == nil { + return v, nil + } + } + if !hasNonNil { return cty.DynamicVal, Diagnostics{ { @@ -117,6 +131,18 @@ func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { } } +func (t Traversal) toStringValue() string { + var o strings.Builder + o.WriteString(t.RootName()) + + for _, v := range t[1:] { + o.WriteByte('.') + o.WriteString(v.(TraverseAttr).Name) + } + + return o.String() +} + // IsRelative returns true if the receiver is a relative traversal, or false // otherwise. func (t Traversal) IsRelative() bool { diff --git a/vendor/github.com/hashicorp/nomad/api/constraint.go b/vendor/github.com/hashicorp/nomad/api/constraint.go index 3233a3bfdcd0..7213270e53f0 100644 --- a/vendor/github.com/hashicorp/nomad/api/constraint.go +++ b/vendor/github.com/hashicorp/nomad/api/constraint.go @@ -15,9 +15,9 @@ const ( // Constraint is used to serialize a job placement constraint. type Constraint struct { - LTarget string - RTarget string - Operand string + LTarget string `hcl:"attribute,optional"` + RTarget string `hcl:"value,optional"` + Operand string `hcl:"operator,optional"` } // NewConstraint generates a new job placement constraint. diff --git a/vendor/github.com/hashicorp/nomad/api/csi.go b/vendor/github.com/hashicorp/nomad/api/csi.go index 47d7017a2553..3c657f557e90 100644 --- a/vendor/github.com/hashicorp/nomad/api/csi.go +++ b/vendor/github.com/hashicorp/nomad/api/csi.go @@ -89,9 +89,9 @@ const ( ) type CSIMountOptions struct { - FSType string `hcl:"fs_type"` - MountFlags []string `hcl:"mount_flags"` - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` // report unexpected keys + FSType string `hcl:"fs_type,optional"` + MountFlags []string `hcl:"mount_flags,optional"` + ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` // report unexpected keys } type CSISecrets map[string]string @@ -133,7 +133,7 @@ type CSIVolume struct { ModifyIndex uint64 // ExtraKeysHCL is used by the hcl parser to report unexpected keys - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` + ExtraKeysHCL []string `hcl1:",unusedKeys" json:"-"` } // allocs is called after we query the volume (creating this CSIVolume struct) to collapse diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index fc195238e366..7b4cf18a588c 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -50,6 +50,9 @@ type JobsParseRequest struct { // JobHCL is an hcl jobspec JobHCL string + // HCLv1 indicates whether the JobHCL should be parsed with the hcl v1 parser + HCLv1 bool `json:"hclv1,omitempty"` + // Canonicalize is a flag as to if the server should return default values // for unset fields Canonicalize bool @@ -439,15 +442,15 @@ type periodicForceResponse struct { // UpdateStrategy defines a task groups update strategy. type UpdateStrategy struct { - Stagger *time.Duration `mapstructure:"stagger"` - MaxParallel *int `mapstructure:"max_parallel"` - HealthCheck *string `mapstructure:"health_check"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` - ProgressDeadline *time.Duration `mapstructure:"progress_deadline"` - Canary *int `mapstructure:"canary"` - AutoRevert *bool `mapstructure:"auto_revert"` - AutoPromote *bool `mapstructure:"auto_promote"` + Stagger *time.Duration `mapstructure:"stagger" hcl:"stagger,optional"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` + ProgressDeadline *time.Duration `mapstructure:"progress_deadline" hcl:"progress_deadline,optional"` + Canary *int `mapstructure:"canary" hcl:"canary,optional"` + AutoRevert *bool `mapstructure:"auto_revert" hcl:"auto_revert,optional"` + AutoPromote *bool `mapstructure:"auto_promote" hcl:"auto_promote,optional"` } // DefaultUpdateStrategy provides a baseline that can be used to upgrade @@ -640,8 +643,8 @@ func (u *UpdateStrategy) Empty() bool { } type Multiregion struct { - Strategy *MultiregionStrategy - Regions []*MultiregionRegion + Strategy *MultiregionStrategy `hcl:"strategy,block"` + Regions []*MultiregionRegion `hcl:"region,block"` } func (m *Multiregion) Canonicalize() { @@ -700,24 +703,24 @@ func (m *Multiregion) Copy() *Multiregion { } type MultiregionStrategy struct { - MaxParallel *int `mapstructure:"max_parallel"` - OnFailure *string `mapstructure:"on_failure"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + OnFailure *string `mapstructure:"on_failure" hcl:"on_failure,optional"` } type MultiregionRegion struct { - Name string - Count *int - Datacenters []string - Meta map[string]string + Name string `hcl:",label"` + Count *int `hcl:"count,optional"` + Datacenters []string `hcl:"datacenters,optional"` + Meta map[string]string `hcl:"meta,block"` } // PeriodicConfig is for serializing periodic config for a job. type PeriodicConfig struct { - Enabled *bool - Spec *string + Enabled *bool `hcl:"enabled,optional"` + Spec *string `hcl:"cron,optional"` SpecType *string - ProhibitOverlap *bool `mapstructure:"prohibit_overlap"` - TimeZone *string `mapstructure:"time_zone"` + ProhibitOverlap *bool `mapstructure:"prohibit_overlap" hcl:"prohibit_overlap,optional"` + TimeZone *string `mapstructure:"time_zone" hcl:"time_zone,optional"` } func (p *PeriodicConfig) Canonicalize() { @@ -779,38 +782,43 @@ func (p *PeriodicConfig) GetLocation() (*time.Location, error) { // ParameterizedJobConfig is used to configure the parameterized job. type ParameterizedJobConfig struct { - Payload string - MetaRequired []string `mapstructure:"meta_required"` - MetaOptional []string `mapstructure:"meta_optional"` + Payload string `hcl:"payload,optional"` + MetaRequired []string `mapstructure:"meta_required" hcl:"meta_required,optional"` + MetaOptional []string `mapstructure:"meta_optional" hcl:"meta_optional,optional"` } // Job is used to serialize a job. type Job struct { + /* Fields parsed from HCL config */ + + Region *string `hcl:"region,optional"` + Namespace *string `hcl:"namespace,optional"` + ID *string `hcl:"id,optional"` + Name *string `hcl:"name,optional"` + Type *string `hcl:"type,optional"` + Priority *int `hcl:"priority,optional"` + AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"` + Datacenters []string `hcl:"datacenters,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + TaskGroups []*TaskGroup `hcl:"group,block"` + Update *UpdateStrategy `hcl:"update,block"` + Multiregion *Multiregion `hcl:"multiregion,block"` + Spreads []*Spread `hcl:"spread,block"` + Periodic *PeriodicConfig `hcl:"periodic,block"` + ParameterizedJob *ParameterizedJobConfig `hcl:"parameterized,block"` + Reschedule *ReschedulePolicy `hcl:"reschedule,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Meta map[string]string `hcl:"meta,block"` + ConsulToken *string `mapstructure:"consul_token" hcl:"consul_token,optional"` + VaultToken *string `mapstructure:"vault_token" hcl:"vault_token,optional"` + + /* Fields set by server, not sourced from job config file */ + Stop *bool - Region *string - Namespace *string - ID *string ParentID *string - Name *string - Type *string - Priority *int - AllAtOnce *bool `mapstructure:"all_at_once"` - Datacenters []string - Constraints []*Constraint - Affinities []*Affinity - TaskGroups []*TaskGroup - Update *UpdateStrategy - Multiregion *Multiregion - Spreads []*Spread - Periodic *PeriodicConfig - ParameterizedJob *ParameterizedJobConfig Dispatched bool Payload []byte - Reschedule *ReschedulePolicy - Migrate *MigrateStrategy - Meta map[string]string - ConsulToken *string `mapstructure:"consul_token"` - VaultToken *string `mapstructure:"vault_token"` VaultNamespace *string `mapstructure:"vault_namespace"` NomadTokenID *string `mapstructure:"nomad_token_id"` Status *string diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go index 74ef55e4e1fb..f6328af92e58 100644 --- a/vendor/github.com/hashicorp/nomad/api/resources.go +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -7,17 +7,17 @@ import ( // Resources encapsulates the required resources of // a given task or task group. type Resources struct { - CPU *int - MemoryMB *int `mapstructure:"memory"` - DiskMB *int `mapstructure:"disk"` - Networks []*NetworkResource - Devices []*RequestedDevice + CPU *int `hcl:"cpu,optional"` + MemoryMB *int `mapstructure:"memory" hcl:"memory,optional"` + DiskMB *int `mapstructure:"disk" hcl:"disk,optional"` + Networks []*NetworkResource `hcl:"network,block"` + Devices []*RequestedDevice `hcl:"device,block"` // COMPAT(0.10) // XXX Deprecated. Please do not use. The field will be removed in Nomad // 0.10 and is only being kept to allow any references to be removed before // then. - IOPS *int + IOPS *int `hcl:"iops,optional"` } // Canonicalize will supply missing values in the cases @@ -84,29 +84,29 @@ func (r *Resources) Merge(other *Resources) { } type Port struct { - Label string - Value int `mapstructure:"static"` - To int `mapstructure:"to"` - HostNetwork string `mapstructure:"host_network"` + Label string `hcl:",label"` + Value int `mapstructure:"static" hcl:"static,optional"` + To int `mapstructure:"to" hcl:"to,optional"` + HostNetwork string `mapstructure:"host_network" hcl:"host_network,optional"` } type DNSConfig struct { - Servers []string `mapstructure:"servers"` - Searches []string `mapstructure:"searches"` - Options []string `mapstructure:"options"` + Servers []string `mapstructure:"servers" hcl:"servers,optional"` + Searches []string `mapstructure:"searches" hcl:"searches,optional"` + Options []string `mapstructure:"options" hcl:"options,optional"` } // NetworkResource is used to describe required network // resources of a given task. type NetworkResource struct { - Mode string - Device string - CIDR string - IP string - MBits *int - DNS *DNSConfig - ReservedPorts []Port - DynamicPorts []Port + Mode string `hcl:"mode,optional"` + Device string `hcl:"device,optional"` + CIDR string `hcl:"cidr,optional"` + IP string `hcl:"ip,optional"` + MBits *int `hcl:"mbits,optional"` + DNS *DNSConfig `hcl:"dns,block"` + ReservedPorts []Port `hcl:"reserved_ports,block"` + DynamicPorts []Port `hcl:"port,block"` } func (n *NetworkResource) Canonicalize() { @@ -226,18 +226,18 @@ type RequestedDevice struct { // * "gpu" // * "nvidia/gpu" // * "nvidia/gpu/GTX2080Ti" - Name string + Name string `hcl:",label"` // Count is the number of requested devices - Count *uint64 + Count *uint64 `hcl:"count,optional"` // Constraints are a set of constraints to apply when selecting the device // to use. - Constraints []*Constraint + Constraints []*Constraint `hcl:"constraint,block"` // Affinities are a set of affinites to apply when selecting the device // to use. - Affinities []*Affinity + Affinities []*Affinity `hcl:"affinity,block"` } func (d *RequestedDevice) Canonicalize() { diff --git a/vendor/github.com/hashicorp/nomad/api/scaling.go b/vendor/github.com/hashicorp/nomad/api/scaling.go index 98c48f06c9dd..56a0a964ac79 100644 --- a/vendor/github.com/hashicorp/nomad/api/scaling.go +++ b/vendor/github.com/hashicorp/nomad/api/scaling.go @@ -60,14 +60,19 @@ type ScalingRequest struct { // ScalingPolicy is the user-specified API object for an autoscaling policy type ScalingPolicy struct { + /* fields set by user in HCL config */ + + Min *int64 `hcl:"min,optional"` + Max *int64 `hcl:"max,optional"` + Policy map[string]interface{} `hcl:"policy,block"` + Enabled *bool `hcl:"enabled,optional"` + + /* fields set by server */ + ID string Namespace string Type string Target map[string]string - Min *int64 - Max *int64 - Policy map[string]interface{} - Enabled *bool CreateIndex uint64 ModifyIndex uint64 } diff --git a/vendor/github.com/hashicorp/nomad/api/services.go b/vendor/github.com/hashicorp/nomad/api/services.go index 9640119d64bb..fb9220de490c 100644 --- a/vendor/github.com/hashicorp/nomad/api/services.go +++ b/vendor/github.com/hashicorp/nomad/api/services.go @@ -8,9 +8,9 @@ import ( // CheckRestart describes if and when a task should be restarted based on // failing health checks. type CheckRestart struct { - Limit int `mapstructure:"limit"` - Grace *time.Duration `mapstructure:"grace"` - IgnoreWarnings bool `mapstructure:"ignore_warnings"` + Limit int `mapstructure:"limit" hcl:"limit,optional"` + Grace *time.Duration `mapstructure:"grace" hcl:"grace,optional"` + IgnoreWarnings bool `mapstructure:"ignore_warnings" hcl:"ignore_warnings,optional"` } // Canonicalize CheckRestart fields if not nil. @@ -73,46 +73,46 @@ func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart { // ServiceCheck represents the consul health check that Nomad registers. type ServiceCheck struct { //FIXME Id is unused. Remove? - Id string - Name string - Type string - Command string - Args []string - Path string - Protocol string - PortLabel string `mapstructure:"port"` - Expose bool - AddressMode string `mapstructure:"address_mode"` - Interval time.Duration - Timeout time.Duration - InitialStatus string `mapstructure:"initial_status"` - TLSSkipVerify bool `mapstructure:"tls_skip_verify"` - Header map[string][]string - Method string - CheckRestart *CheckRestart `mapstructure:"check_restart"` - GRPCService string `mapstructure:"grpc_service"` - GRPCUseTLS bool `mapstructure:"grpc_use_tls"` - TaskName string `mapstructure:"task"` - SuccessBeforePassing int `mapstructure:"success_before_passing"` - FailuresBeforeCritical int `mapstructure:"failures_before_critical"` + Id string `hcl:"id,optional"` + Name string `hcl:"name,optional"` + Type string `hcl:"type,optional"` + Command string `hcl:"command,optional"` + Args []string `hcl:"args,optional"` + Path string `hcl:"path,optional"` + Protocol string `hcl:"protocol,optional"` + PortLabel string `mapstructure:"port" hcl:"port,optional"` + Expose bool `hcl:"expose,optional"` + AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` + Interval time.Duration `hcl:"interval,optional"` + Timeout time.Duration `hcl:"timeout,optional"` + InitialStatus string `mapstructure:"initial_status" hcl:"initial_status,optional"` + TLSSkipVerify bool `mapstructure:"tls_skip_verify" hcl:"tls_skip_verify,optional"` + Header map[string][]string `hcl:"header,block"` + Method string `hcl:"method,optional"` + CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` + GRPCService string `mapstructure:"grpc_service" hcl:"grpc_service,optional"` + GRPCUseTLS bool `mapstructure:"grpc_use_tls" hcl:"grpc_use_tls,optional"` + TaskName string `mapstructure:"task" hcl:"task,optional"` + SuccessBeforePassing int `mapstructure:"success_before_passing" hcl:"success_before_passing,optional"` + FailuresBeforeCritical int `mapstructure:"failures_before_critical" hcl:"failures_before_critical,optional"` } // Service represents a Consul service definition. type Service struct { //FIXME Id is unused. Remove? - Id string - Name string - Tags []string - CanaryTags []string `mapstructure:"canary_tags"` - EnableTagOverride bool `mapstructure:"enable_tag_override"` - PortLabel string `mapstructure:"port"` - AddressMode string `mapstructure:"address_mode"` - Checks []ServiceCheck - CheckRestart *CheckRestart `mapstructure:"check_restart"` - Connect *ConsulConnect - Meta map[string]string - CanaryMeta map[string]string - TaskName string `mapstructure:"task"` + Id string `hcl:"id,optional"` + Name string `hcl:"name,optional"` + Tags []string `hcl:"tags,optional"` + CanaryTags []string `mapstructure:"canary_tags" hcl:"canary_tags,optional"` + EnableTagOverride bool `mapstructure:"enable_tag_override" hcl:"enable_tag_override,optional"` + PortLabel string `mapstructure:"port" hcl:"port,optional"` + AddressMode string `mapstructure:"address_mode" hcl:"address_mode,optional"` + Checks []ServiceCheck `hcl:"check,block"` + CheckRestart *CheckRestart `mapstructure:"check_restart" hcl:"check_restart,block"` + Connect *ConsulConnect `hcl:"connect,block"` + Meta map[string]string `hcl:"meta,block"` + CanaryMeta map[string]string `hcl:"canary_meta,block"` + TaskName string `mapstructure:"task" hcl:"task,optional"` } // Canonicalize the Service by ensuring its name and address mode are set. Task @@ -151,10 +151,10 @@ func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { // ConsulConnect represents a Consul Connect jobspec stanza. type ConsulConnect struct { - Native bool - Gateway *ConsulGateway - SidecarService *ConsulSidecarService `mapstructure:"sidecar_service"` - SidecarTask *SidecarTask `mapstructure:"sidecar_task"` + Native bool `hcl:"native,optional"` + Gateway *ConsulGateway `hcl:"gateway,block"` + SidecarService *ConsulSidecarService `mapstructure:"sidecar_service" hcl:"sidecar_service,block"` + SidecarTask *SidecarTask `mapstructure:"sidecar_task" hcl:"sidecar_task,block"` } func (cc *ConsulConnect) Canonicalize() { @@ -170,9 +170,9 @@ func (cc *ConsulConnect) Canonicalize() { // ConsulSidecarService represents a Consul Connect SidecarService jobspec // stanza. type ConsulSidecarService struct { - Tags []string - Port string - Proxy *ConsulProxy + Tags []string `hcl:"tags,optional"` + Port string `hcl:"port,optional"` + Proxy *ConsulProxy `hcl:"proxy,block"` } func (css *ConsulSidecarService) Canonicalize() { @@ -190,17 +190,17 @@ func (css *ConsulSidecarService) Canonicalize() { // SidecarTask represents a subset of Task fields that can be set to override // the fields of the Task generated for the sidecar type SidecarTask struct { - Name string - Driver string - User string - Config map[string]interface{} - Env map[string]string - Resources *Resources - Meta map[string]string - KillTimeout *time.Duration `mapstructure:"kill_timeout"` - LogConfig *LogConfig `mapstructure:"logs"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"` - KillSignal string `mapstructure:"kill_signal"` + Name string `hcl:"name,optional"` + Driver string `hcl:"driver,optional"` + User string `hcl:"user,optional"` + Config map[string]interface{} `hcl:"config,block"` + Env map[string]string `hcl:"env,block"` + Resources *Resources `hcl:"resources,block"` + Meta map[string]string `hcl:"meta,block"` + KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` + LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` } func (st *SidecarTask) Canonicalize() { @@ -243,11 +243,11 @@ func (st *SidecarTask) Canonicalize() { // ConsulProxy represents a Consul Connect sidecar proxy jobspec stanza. type ConsulProxy struct { - LocalServiceAddress string `mapstructure:"local_service_address"` - LocalServicePort int `mapstructure:"local_service_port"` - ExposeConfig *ConsulExposeConfig `mapstructure:"expose"` - Upstreams []*ConsulUpstream - Config map[string]interface{} + LocalServiceAddress string `mapstructure:"local_service_address" hcl:"local_service_address,optional"` + LocalServicePort int `mapstructure:"local_service_port" hcl:"local_service_port,optional"` + ExposeConfig *ConsulExposeConfig `mapstructure:"expose" hcl:"expose,block"` + Upstreams []*ConsulUpstream `hcl:"upstreams,block"` + Config map[string]interface{} `hcl:"config,block"` } func (cp *ConsulProxy) Canonicalize() { @@ -268,12 +268,12 @@ func (cp *ConsulProxy) Canonicalize() { // ConsulUpstream represents a Consul Connect upstream jobspec stanza. type ConsulUpstream struct { - DestinationName string `mapstructure:"destination_name"` - LocalBindPort int `mapstructure:"local_bind_port"` + DestinationName string `mapstructure:"destination_name" hcl:"destination_name,optional"` + LocalBindPort int `mapstructure:"local_bind_port" hcl:"local_bind_port,optional"` } type ConsulExposeConfig struct { - Path []*ConsulExposePath `mapstructure:"path"` + Path []*ConsulExposePath `mapstructure:"path" hcl:"path,block"` } func (cec *ConsulExposeConfig) Canonicalize() { @@ -287,19 +287,19 @@ func (cec *ConsulExposeConfig) Canonicalize() { } type ConsulExposePath struct { - Path string - Protocol string - LocalPathPort int `mapstructure:"local_path_port"` - ListenerPort string `mapstructure:"listener_port"` + Path string `hcl:"path,optional"` + Protocol string `hcl:"protocol,optional"` + LocalPathPort int `mapstructure:"local_path_port" hcl:"local_path_port,optional"` + ListenerPort string `mapstructure:"listener_port" hcl:"listener_port,optional"` } // ConsulGateway is used to configure one of the Consul Connect Gateway types. type ConsulGateway struct { // Proxy is used to configure the Envoy instance acting as the gateway. - Proxy *ConsulGatewayProxy + Proxy *ConsulGatewayProxy `hcl:"proxy,block"` // Ingress represents the Consul Configuration Entry for an Ingress Gateway. - Ingress *ConsulIngressConfigEntry + Ingress *ConsulIngressConfigEntry `hcl:"ingress,block"` // Terminating is not yet supported. // Terminating *ConsulTerminatingConfigEntry @@ -328,8 +328,9 @@ func (g *ConsulGateway) Copy() *ConsulGateway { } type ConsulGatewayBindAddress struct { - Address string `mapstructure:"address"` - Port int `mapstructure:"port"` + Name string `hcl:",label"` + Address string `mapstructure:"address" hcl:"address,optional"` + Port int `mapstructure:"port" hcl:"port,optional"` } var ( @@ -341,11 +342,11 @@ var ( // // https://www.consul.io/docs/connect/proxies/envoy#gateway-options type ConsulGatewayProxy struct { - ConnectTimeout *time.Duration `mapstructure:"connect_timeout"` - EnvoyGatewayBindTaggedAddresses bool `mapstructure:"envoy_gateway_bind_tagged_addresses"` - EnvoyGatewayBindAddresses map[string]*ConsulGatewayBindAddress `mapstructure:"envoy_gateway_bind_addresses"` - EnvoyGatewayNoDefaultBind bool `mapstructure:"envoy_gateway_no_default_bind"` - Config map[string]interface{} // escape hatch envoy config + ConnectTimeout *time.Duration `mapstructure:"connect_timeout" hcl:"connect_timeout,optional"` + EnvoyGatewayBindTaggedAddresses bool `mapstructure:"envoy_gateway_bind_tagged_addresses" hcl:"envoy_gateway_bind_tagged_addresses,optional"` + EnvoyGatewayBindAddresses map[string]*ConsulGatewayBindAddress `mapstructure:"envoy_gateway_bind_addresses" hcl:"envoy_gateway_bind_addresses,block"` + EnvoyGatewayNoDefaultBind bool `mapstructure:"envoy_gateway_no_default_bind" hcl:"envoy_gateway_no_default_bind,optional"` + Config map[string]interface{} `hcl:"config,block"` // escape hatch envoy config } func (p *ConsulGatewayProxy) Canonicalize() { @@ -399,7 +400,7 @@ func (p *ConsulGatewayProxy) Copy() *ConsulGatewayProxy { // ConsulGatewayTLSConfig is used to configure TLS for a gateway. type ConsulGatewayTLSConfig struct { - Enabled bool + Enabled bool `hcl:"enabled,optional"` } func (tc *ConsulGatewayTLSConfig) Canonicalize() { @@ -419,9 +420,9 @@ func (tc *ConsulGatewayTLSConfig) Copy() *ConsulGatewayTLSConfig { type ConsulIngressService struct { // Namespace is not yet supported. // Namespace string - Name string + Name string `hcl:"name,optional"` - Hosts []string + Hosts []string `hcl:"hosts,optional"` } func (s *ConsulIngressService) Canonicalize() { @@ -458,9 +459,9 @@ const ( // ConsulIngressListener is used to configure a listener on a Consul Ingress // Gateway. type ConsulIngressListener struct { - Port int - Protocol string - Services []*ConsulIngressService + Port int `hcl:"port,optional"` + Protocol string `hcl:"protocol,optional"` + Services []*ConsulIngressService `hcl:"service,block"` } func (l *ConsulIngressListener) Canonicalize() { @@ -506,8 +507,8 @@ type ConsulIngressConfigEntry struct { // Namespace is not yet supported. // Namespace string - TLS *ConsulGatewayTLSConfig - Listeners []*ConsulIngressListener + TLS *ConsulGatewayTLSConfig `hcl:"tls,block"` + Listeners []*ConsulIngressListener `hcl:"listener,block"` } func (e *ConsulIngressConfigEntry) Canonicalize() { diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index 2de8ea5915b2..b331ff6b044a 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -67,10 +67,10 @@ type AllocResourceUsage struct { // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { - Interval *time.Duration - Attempts *int - Delay *time.Duration - Mode *string + Interval *time.Duration `hcl:"interval,optional"` + Attempts *int `hcl:"attempts,optional"` + Delay *time.Duration `hcl:"delay,optional"` + Mode *string `hcl:"mode,optional"` } func (r *RestartPolicy) Merge(rp *RestartPolicy) { @@ -91,24 +91,24 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { // Reschedule configures how Tasks are rescheduled when they crash or fail. type ReschedulePolicy struct { // Attempts limits the number of rescheduling attempts that can occur in an interval. - Attempts *int `mapstructure:"attempts"` + Attempts *int `mapstructure:"attempts" hcl:"attempts,optional"` // Interval is a duration in which we can limit the number of reschedule attempts. - Interval *time.Duration `mapstructure:"interval"` + Interval *time.Duration `mapstructure:"interval" hcl:"interval,optional"` // Delay is a minimum duration to wait between reschedule attempts. // The delay function determines how much subsequent reschedule attempts are delayed by. - Delay *time.Duration `mapstructure:"delay"` + Delay *time.Duration `mapstructure:"delay" hcl:"delay,optional"` // DelayFunction determines how the delay progressively changes on subsequent reschedule // attempts. Valid values are "exponential", "constant", and "fibonacci". - DelayFunction *string `mapstructure:"delay_function"` + DelayFunction *string `mapstructure:"delay_function" hcl:"delay_function,optional"` // MaxDelay is an upper bound on the delay. - MaxDelay *time.Duration `mapstructure:"max_delay"` + MaxDelay *time.Duration `mapstructure:"max_delay" hcl:"max_delay,optional"` // Unlimited allows rescheduling attempts until they succeed - Unlimited *bool `mapstructure:"unlimited"` + Unlimited *bool `mapstructure:"unlimited" hcl:"unlimited,optional"` } func (r *ReschedulePolicy) Merge(rp *ReschedulePolicy) { @@ -159,10 +159,10 @@ func (r *ReschedulePolicy) Canonicalize(jobType string) { // Affinity is used to serialize task group affinities type Affinity struct { - LTarget string // Left-hand target - RTarget string // Right-hand target - Operand string // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any - Weight *int8 // Weight applied to nodes that match the affinity. Can be negative + LTarget string `hcl:"attribute,optional"` // Left-hand target + RTarget string `hcl:"value,optional"` // Right-hand target + Operand string `hcl:"operator,optional"` // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any + Weight *int8 `hcl:"weight,optional"` // Weight applied to nodes that match the affinity. Can be negative } func NewAffinity(LTarget string, Operand string, RTarget string, Weight int8) *Affinity { @@ -255,15 +255,15 @@ func (p *ReschedulePolicy) String() string { // Spread is used to serialize task group allocation spread preferences type Spread struct { - Attribute string - Weight *int8 - SpreadTarget []*SpreadTarget + Attribute string `hcl:"attribute,optional"` + Weight *int8 `hcl:"weight,optional"` + SpreadTarget []*SpreadTarget `hcl:"target,block"` } // SpreadTarget is used to serialize target allocation spread percentages type SpreadTarget struct { - Value string - Percent uint8 + Value string `hcl:",label"` + Percent uint8 `hcl:"percent,optional"` } func NewSpreadTarget(value string, percent uint8) *SpreadTarget { @@ -289,9 +289,9 @@ func (s *Spread) Canonicalize() { // EphemeralDisk is an ephemeral disk object type EphemeralDisk struct { - Sticky *bool - Migrate *bool - SizeMB *int `mapstructure:"size"` + Sticky *bool `hcl:"sticky,optional"` + Migrate *bool `hcl:"migrate,optional"` + SizeMB *int `mapstructure:"size" hcl:"size,optional"` } func DefaultEphemeralDisk() *EphemeralDisk { @@ -317,10 +317,10 @@ func (e *EphemeralDisk) Canonicalize() { // MigrateStrategy describes how allocations for a task group should be // migrated between nodes (eg when draining). type MigrateStrategy struct { - MaxParallel *int `mapstructure:"max_parallel"` - HealthCheck *string `mapstructure:"health_check"` - MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` - HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` + MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"` + HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"` } func DefaultMigrateStrategy() *MigrateStrategy { @@ -377,12 +377,12 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy { // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool `hcl:"read_only"` - MountOptions *CSIMountOptions `hcl:"mount_options"` - ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` + Name string `hcl:"name,label"` + Type string `hcl:"type,optional"` + Source string `hcl:"source,optional"` + ReadOnly bool `hcl:"read_only,optional"` + MountOptions *CSIMountOptions `hcl:"mount_options,block"` + ExtraKeysHCL []string `hcl1:",unusedKeys,optional" json:"-"` } const ( @@ -394,10 +394,10 @@ const ( // VolumeMount represents the relationship between a destination path in a task // and the task group volume that should be mounted there. type VolumeMount struct { - Volume *string - Destination *string - ReadOnly *bool `mapstructure:"read_only"` - PropagationMode *string `mapstructure:"propagation_mode"` + Volume *string `hcl:"volume,optional"` + Destination *string `hcl:"destination,optional"` + ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"` + PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"` } func (vm *VolumeMount) Canonicalize() { @@ -411,24 +411,24 @@ func (vm *VolumeMount) Canonicalize() { // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name *string - Count *int - Constraints []*Constraint - Affinities []*Affinity - Tasks []*Task - Spreads []*Spread - Volumes map[string]*VolumeRequest - RestartPolicy *RestartPolicy - ReschedulePolicy *ReschedulePolicy - EphemeralDisk *EphemeralDisk - Update *UpdateStrategy - Migrate *MigrateStrategy - Networks []*NetworkResource - Meta map[string]string - Services []*Service - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay"` - StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect"` - Scaling *ScalingPolicy + Name *string `hcl:"name,label"` + Count *int `hcl:"count,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Tasks []*Task `hcl:"task,block"` + Spreads []*Spread `hcl:"spread,block"` + Volumes map[string]*VolumeRequest `hcl:"volume,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` + EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` + Update *UpdateStrategy `hcl:"update,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Networks []*NetworkResource `hcl:"network,block"` + Meta map[string]string `hcl:"meta,block"` + Services []*Service `hcl:"service,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` + Scaling *ScalingPolicy `hcl:"scaling,block"` } // NewTaskGroup creates a new TaskGroup. @@ -605,8 +605,8 @@ func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup { // LogConfig provides configuration for log rotation type LogConfig struct { - MaxFiles *int `mapstructure:"max_files"` - MaxFileSizeMB *int `mapstructure:"max_file_size"` + MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"` + MaxFileSizeMB *int `mapstructure:"max_file_size" hcl:"max_file_size,optional"` } func DefaultLogConfig() *LogConfig { @@ -627,17 +627,17 @@ func (l *LogConfig) Canonicalize() { // DispatchPayloadConfig configures how a task gets its input from a job dispatch type DispatchPayloadConfig struct { - File string + File string `hcl:"file,optional"` } const ( - TaskLifecycleHookPrestart = "prestart" + TaskLifecycleHookPrestart = "prestart" TaskLifecycleHookPoststart = "poststart" ) type TaskLifecycle struct { - Hook string `mapstructure:"hook"` - Sidecar bool `mapstructure:"sidecar"` + Hook string `mapstructure:"hook" hcl:"hook,optional"` + Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"` } // Determine if lifecycle has user-input values @@ -647,30 +647,30 @@ func (l *TaskLifecycle) Empty() bool { // Task is a single process in a task group. type Task struct { - Name string - Driver string - User string - Lifecycle *TaskLifecycle - Config map[string]interface{} - Constraints []*Constraint - Affinities []*Affinity - Env map[string]string - Services []*Service - Resources *Resources - RestartPolicy *RestartPolicy - Meta map[string]string - KillTimeout *time.Duration `mapstructure:"kill_timeout"` - LogConfig *LogConfig `mapstructure:"logs"` - Artifacts []*TaskArtifact - Vault *Vault - Templates []*Template - DispatchPayload *DispatchPayloadConfig - VolumeMounts []*VolumeMount - CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty"` - Leader bool - ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` - KillSignal string `mapstructure:"kill_signal"` - Kind string + Name string `hcl:"name,label"` + Driver string `hcl:"driver,optional"` + User string `hcl:"user,optional"` + Lifecycle *TaskLifecycle `hcl:"lifecycle,block"` + Config map[string]interface{} `hcl:"config,block"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Env map[string]string `hcl:"env,block"` + Services []*Service `hcl:"service,block"` + Resources *Resources `hcl:"resources,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + Meta map[string]string `hcl:"meta,block"` + KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"` + LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"` + Artifacts []*TaskArtifact `hcl:"artifact,block"` + Vault *Vault `hcl:"vault,block"` + Templates []*Template `hcl:"template,block"` + DispatchPayload *DispatchPayloadConfig `hcl:"dispatch_payload,block"` + VolumeMounts []*VolumeMount `hcl:"volume_mount,block"` + CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty" hcl:"csi_plugin,block"` + Leader bool `hcl:"leader,optional"` + ShutdownDelay time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"` + Kind string `hcl:"kind,optional"` } func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { @@ -723,10 +723,10 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource *string `mapstructure:"source"` - GetterOptions map[string]string `mapstructure:"options"` - GetterMode *string `mapstructure:"mode"` - RelativeDest *string `mapstructure:"destination"` + GetterSource *string `mapstructure:"source" hcl:"source,optional"` + GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` + GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` + RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` } func (a *TaskArtifact) Canonicalize() { @@ -753,17 +753,17 @@ func (a *TaskArtifact) Canonicalize() { } type Template struct { - SourcePath *string `mapstructure:"source"` - DestPath *string `mapstructure:"destination"` - EmbeddedTmpl *string `mapstructure:"data"` - ChangeMode *string `mapstructure:"change_mode"` - ChangeSignal *string `mapstructure:"change_signal"` - Splay *time.Duration `mapstructure:"splay"` - Perms *string `mapstructure:"perms"` - LeftDelim *string `mapstructure:"left_delimiter"` - RightDelim *string `mapstructure:"right_delimiter"` - Envvars *bool `mapstructure:"env"` - VaultGrace *time.Duration `mapstructure:"vault_grace"` + SourcePath *string `mapstructure:"source" hcl:"source,optional"` + DestPath *string `mapstructure:"destination" hcl:"destination,optional"` + EmbeddedTmpl *string `mapstructure:"data" hcl:"data,optional"` + ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` + Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"` + Perms *string `mapstructure:"perms" hcl:"perms,optional"` + LeftDelim *string `mapstructure:"left_delimiter" hcl:"left_delimiter,optional"` + RightDelim *string `mapstructure:"right_delimiter" hcl:"right_delimiter,optional"` + Envvars *bool `mapstructure:"env" hcl:"env,optional"` + VaultGrace *time.Duration `mapstructure:"vault_grace" hcl:"vault_grace,optional"` } func (tmpl *Template) Canonicalize() { @@ -812,11 +812,11 @@ func (tmpl *Template) Canonicalize() { } type Vault struct { - Policies []string - Namespace *string `mapstructure:"namespace"` - Env *bool - ChangeMode *string `mapstructure:"change_mode"` - ChangeSignal *string `mapstructure:"change_signal"` + Policies []string `hcl:"policies,optional"` + Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"` + Env *bool `hcl:"env,optional"` + ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"` + ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"` } func (v *Vault) Canonicalize() { @@ -975,10 +975,10 @@ const ( type TaskCSIPluginConfig struct { // ID is the identifier of the plugin. // Ideally this should be the FQDN of the plugin. - ID string `mapstructure:"id"` + ID string `mapstructure:"id" hcl:"id,optional"` // CSIPluginType instructs Nomad on how to handle processing a plugin - Type CSIPluginType `mapstructure:"type"` + Type CSIPluginType `mapstructure:"type" hcl:"type,optional"` // MountDir is the destination that nomad should mount in its CSI // directory for the plugin. It will then expect a file called CSISocketName @@ -986,7 +986,7 @@ type TaskCSIPluginConfig struct { // "MountDir/CSIIntermediaryDirname/VolumeName/AllocID for mounts. // // Default is /csi. - MountDir string `mapstructure:"mount_dir"` + MountDir string `mapstructure:"mount_dir" hcl:"mount_dir,optional"` } func (t *TaskCSIPluginConfig) Canonicalize() { diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml new file mode 100644 index 000000000000..13ff9986686f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.12 + diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md new file mode 100644 index 000000000000..b329bd05d9d6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md @@ -0,0 +1,16 @@ +# 1.0.2 (June 17, 2020) + +* The YAML decoder now follows the YAML specification more closely when parsing + numeric values. + ([#6](https://github.com/zclconf/go-cty-yaml/pull/6)) + +# 1.0.1 (July 30, 2019) + +* The YAML decoder is now correctly treating quoted scalars as verbatim literal + strings rather than using the fuzzy type selection rules for them. Fuzzy + type selection rules still apply to unquoted scalars. + ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) + +# 1.0.0 (May 26, 2019) + +Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml new file mode 100644 index 000000000000..8da58fbf6f84 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE new file mode 100644 index 000000000000..4e6c00ab3177 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/NOTICE @@ -0,0 +1,20 @@ +This package is derived from gopkg.in/yaml.v2, which is copyright +2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Includes mechanical ports of code from libyaml, distributed under its original +license. See LICENSE.libyaml for more information. + +Modifications for cty interfacing copyright 2019 Martin Atkins, and +distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go new file mode 100644 index 000000000000..1f7e87e67275 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go new file mode 100644 index 000000000000..a73b34a8b2ab --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/converter.go @@ -0,0 +1,69 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ConverterConfig is used to configure a new converter, using NewConverter. +type ConverterConfig struct { + // EncodeAsFlow, when set to true, causes Marshal to produce flow-style + // mapping and sequence serializations. + EncodeAsFlow bool +} + +// A Converter can marshal and unmarshal between cty values and YAML bytes. +// +// Because there are many different ways to map cty to YAML and vice-versa, +// a converter is configurable using the settings in ConverterConfig, which +// allow for a few different permutations of mapping to YAML. +// +// If you are just trying to work with generic, standard YAML, the predefined +// converter in Standard should be good enough. +type Converter struct { + encodeAsFlow bool +} + +// NewConverter creates a new Converter with the given configuration. +func NewConverter(config *ConverterConfig) *Converter { + return &Converter{ + encodeAsFlow: config.EncodeAsFlow, + } +} + +// Standard is a predefined Converter that produces and consumes generic YAML +// using only built-in constructs that any other YAML implementation ought to +// understand. +var Standard *Converter = NewConverter(&ConverterConfig{}) + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { + return c.impliedType(src) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func (c *Converter) Marshal(v cty.Value) ([]byte, error) { + return c.marshal(v) +} + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// An error is returned if the given source contains any YAML document +// delimiters. +func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return c.unmarshal(src, ty) +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go new file mode 100644 index 000000000000..b91141ccaa94 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go @@ -0,0 +1,57 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code +// into a cty Value, using the ImpliedType and Unmarshal methods of the +// Standard pre-defined converter. +var YAMLDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "src", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + return cty.DynamicPseudoType, nil + } + if args[0].IsNull() { + return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") + } + return Standard.ImpliedType([]byte(args[0].AsString())) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + return Standard.Unmarshal([]byte(args[0].AsString()), retType) + }, +}) + +// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value +// into YAML. +var YAMLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + raw, err := Standard.Marshal(args[0]) + if err != nil { + return cty.NilVal, err + } + return cty.StringVal(string(raw)), nil + }, +}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go new file mode 100644 index 000000000000..e369ff27c88e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/decode.go @@ -0,0 +1,261 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilVal, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &valueAnalysis{ + anchorsPending: map[string]int{}, + anchorVals: map[string]cty.Value{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing start of document") + } + + v, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return convert.Convert(v, ty) +} + +func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + return c.unmarshalParseRemainder(an, &evt, p) +} + +func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.unmarshalScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.unmarshalAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.unmarshalMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.unmarshalSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilVal, parseEventErrorWrap(evt, err) + } + + if val.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + val = cty.StringVal("<<") + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, val) + } + return val, nil +} + +func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + vals := make(map[string]cty.Value) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + v := cty.ObjectVal(vals) + if anchor != "" { + an.completeAnchor(anchor, v) + } + return v, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilVal, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + ty := val.Type() + if !(ty.IsObjectType() || ty.IsMapType()) { + return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + vals[k.AsString()] = v + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") + } + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + vals[keyVal.AsString()] = val + } +} + +func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var vals []cty.Value + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.TupleVal(vals) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + val, err := c.unmarshalParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, val) + } +} + +func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + v, err := an.anchorVal(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return v, err +} + +type valueAnalysis struct { + anchorsPending map[string]int + anchorVals map[string]cty.Value +} + +func (an *valueAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorVals[name] = v +} + +func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorVals[name] + if !ok { + return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go new file mode 100644 index 000000000000..a1c2cc52627f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go new file mode 100644 index 000000000000..daa1478a934c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/encode.go @@ -0,0 +1,189 @@ +package yaml + +import ( + "bytes" + "fmt" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +func (c *Converter) marshal(v cty.Value) ([]byte, error) { + var buf bytes.Buffer + + e := &yaml_emitter_t{} + yaml_emitter_initialize(e) + yaml_emitter_set_output_writer(e, &buf) + yaml_emitter_set_unicode(e, true) + + var evt yaml_event_t + yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_document_start_event_initialize(&evt, nil, nil, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + if err := c.marshalEmit(v, e); err != nil { + return nil, err + } + + yaml_document_end_event_initialize(&evt, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_stream_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + return buf.Bytes(), nil +} + +func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { + ty := v.Type() + switch { + case v.IsNull(): + return c.marshalPrimitive(v, e) + case !v.IsKnown(): + return fmt.Errorf("cannot serialize unknown value as YAML") + case ty.IsPrimitiveType(): + return c.marshalPrimitive(v, e) + case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): + return c.marshalSequence(v, e) + case ty.IsObjectType(), ty.IsMapType(): + return c.marshalMapping(v, e) + default: + return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) + } +} + +func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { + var evt yaml_event_t + + if v.IsNull() { + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte("null"), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil + } + + switch v.Type() { + case cty.String: + str := v.AsString() + style := yaml_DOUBLE_QUOTED_SCALAR_STYLE + if strings.Contains(str, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + style, + ) + case cty.Number: + str := v.AsBigFloat().Text('f', -1) + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + case cty.Bool: + var str string + switch v { + case cty.True: + str = "true" + case cty.False: + str = "false" + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + } + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_SEQUENCE_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_SEQUENCE_STYLE + } + + var evt yaml_event_t + yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + _, v := it.Element() + err := c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_sequence_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_MAPPING_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_MAPPING_STYLE + } + + var evt yaml_event_t + yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + k, v := it.Element() + err := c.marshalEmit(k, e) + if err != nil { + return err + } + err = c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_mapping_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go new file mode 100644 index 000000000000..ae41c488f874 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/error.go @@ -0,0 +1,97 @@ +package yaml + +import ( + "errors" + "fmt" +) + +// Error is an error implementation used to report errors that correspond to +// a particular position in an input buffer. +type Error struct { + cause error + Line, Column int +} + +func (e Error) Error() string { + return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) +} + +// Cause is an implementation of the interface used by +// github.com/pkg/errors.Cause, returning the underlying error without the +// position information. +func (e Error) Cause() error { + return e.cause +} + +// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper +// returning the underlying error without the position information. +func (e Error) WrappedErrors() []error { + return []error{e.cause} +} + +func parserError(p *yaml_parser_t) error { + var cause error + if len(p.problem) > 0 { + cause = errors.New(p.problem) + } else { + cause = errors.New("invalid YAML syntax") // useless generic error, then + } + + return parserErrorWrap(p, cause) +} + +func parserErrorWrap(p *yaml_parser_t, cause error) error { + switch { + case p.problem_mark.line != 0: + line := p.problem_mark.line + column := p.problem_mark.column + // Scanner errors don't iterate line before returning error + if p.error == yaml_SCANNER_ERROR { + line++ + column = 0 + } + return Error{ + cause: cause, + Line: line, + Column: column + 1, + } + case p.context_mark.line != 0: + return Error{ + cause: cause, + Line: p.context_mark.line, + Column: p.context_mark.column + 1, + } + default: + return cause + } +} + +func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { + return parserErrorWrap(p, fmt.Errorf(f, vals...)) +} + +func parseEventErrorWrap(evt *yaml_event_t, cause error) error { + if evt.start_mark.line == 0 { + // Event does not have a start mark, so we won't wrap the error at all + return cause + } + return Error{ + cause: cause, + Line: evt.start_mark.line, + Column: evt.start_mark.column + 1, + } +} + +func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { + return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) +} + +func emitterError(e *yaml_emitter_t) error { + var cause error + if len(e.problem) > 0 { + cause = errors.New(e.problem) + } else { + cause = errors.New("failed to write YAML token") // useless generic error, then + } + return cause +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod new file mode 100644 index 000000000000..3d522687196d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/go.mod @@ -0,0 +1,3 @@ +module github.com/zclconf/go-cty-yaml + +require github.com/zclconf/go-cty v1.0.0 diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum new file mode 100644 index 000000000000..841f7fcfe3c8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/go.sum @@ -0,0 +1,18 @@ +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go new file mode 100644 index 000000000000..5b7b0686fabd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go @@ -0,0 +1,268 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) impliedType(src []byte) (cty.Type, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilType, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &typeAnalysis{ + anchorsPending: map[string]int{}, + anchorTypes: map[string]cty.Type{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing start of document") + } + + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return ty, err +} + +func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + return c.impliedTypeParseRemainder(an, &evt, p) +} + +func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.impliedTypeScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.impliedTypeAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.impliedTypeMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.impliedTypeSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + implicit := evt.implicit + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + var ty cty.Type + switch { + case tag == "" && !implicit: + // Untagged explicit string + ty = cty.String + default: + v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilType, parseEventErrorWrap(evt, err) + } + if v.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + ty = cty.String + } else { + ty = v.Type() + } + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, ty) + } + return ty, nil +} + +func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + atys := make(map[string]cty.Type) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + ty := cty.Object(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilType, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + if !ty.IsObjectType() { + return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for name, aty := range ty.AttributeTypes() { + atys[name] = aty + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") + } + valTy, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + atys[keyVal.AsString()] = valTy + } +} + +func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var atys []cty.Type + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.Tuple(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilType, err + } + + atys = append(atys, valTy) + } +} + +func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + ty, err := an.anchorType(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return ty, err +} + +type typeAnalysis struct { + anchorsPending map[string]int + anchorTypes map[string]cty.Type +} + +func (an *typeAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorTypes[name] = ty +} + +func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorTypes[name] + if !ok { + return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go new file mode 100644 index 000000000000..81d05dfe573f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go new file mode 100644 index 000000000000..7c1f5fac3dbd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go new file mode 100644 index 000000000000..138c7aaa98ff --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/resolve.go @@ -0,0 +1,293 @@ +package yaml + +import ( + "encoding/base64" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" +) + +type resolveMapItem struct { + value cty.Value + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +// Numeric literal regular expressions from the YAML 1.2 spec: +// +// https://yaml.org/spec/1.2/spec.html#id2805071 +var integerLiteralRegexp = regexp.MustCompile(`` + + // start of string, optional sign, and one of: + `\A[-+]?(` + + // octal literal with 0o prefix and optional _ spaces + `|0o[0-7_]+` + + // decimal literal and optional _ spaces + `|[0-9_]+` + + // hexadecimal literal with 0x prefix and optional _ spaces + `|0x[0-9a-fA-F_]+` + + // end of group, and end of string + `)\z`, +) +var floatLiteralRegexp = regexp.MustCompile( + `\A[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?\z`, +) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v cty.Value + tag string + l []string + }{ + {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { + if !resolvableTag(tag) { + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if src != "" { + hint = resolveTable[src[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { + return cty.StringVal(src), nil + } + + // Handle things we can lookup in a map. + if item, ok := resolveMap[src]; ok { + return item.value, nil + } + + if tag == "" { + for _, nan := range []string{".nan", ".NaN", ".NAN"} { + if src == nan { + // cty cannot represent NaN, so this is an error + return cty.NilVal, fmt.Errorf("floating point NaN is not supported") + } + } + } + + // Base 60 floats are intentionally not supported. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + if numberVal, err := cty.ParseNumberVal(src); err == nil { + return numberVal, nil + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + } + + if integerLiteralRegexp.MatchString(src) { + tag = yaml_INT_TAG // will handle parsing below in our tag switch + break + } + if floatLiteralRegexp.MatchString(src) { + tag = yaml_FLOAT_TAG // will handle parsing below in our tag switch + break + } + default: + panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) + } + } + + if tag == "" && src == "<<" { + return mergeMappingVal, nil + } + + switch tag { + case yaml_STR_TAG, yaml_BINARY_TAG: + // If it's binary then we want to keep the base64 representation, because + // cty has no binary type, but we will check that it's actually base64. + if tag == yaml_BINARY_TAG { + _, err := base64.StdEncoding.DecodeString(src) + if err != nil { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) + } + } + return cty.StringVal(src), nil + case yaml_BOOL_TAG: + item, ok := resolveMap[src] + if !ok || item.tag != yaml_BOOL_TAG { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + } + return item.value, nil + case yaml_FLOAT_TAG, yaml_INT_TAG: + // Note: We don't actually check that a value tagged INT is a whole + // number here. We could, but cty generally doesn't care about the + // int/float distinction, so we'll just be generous and accept it. + plain := strings.Replace(src, "_", "", -1) + if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats + return numberVal, nil + } + if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberIntVal(intv), nil + } + if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberUIntVal(uintv), nil + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_TIMESTAMP_TAG: + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_NULL_TAG: + return cty.NullVal(cty.DynamicPseudoType), nil + case "": + return cty.StringVal(src), nil + default: + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} + +type mergeMapping struct{} + +var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) +var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go new file mode 100644 index 000000000000..077fd1dd2d44 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go new file mode 100644 index 000000000000..a2dde608cb7a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go new file mode 100644 index 000000000000..2c314cc16469 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yaml.go @@ -0,0 +1,215 @@ +// Package yaml can marshal and unmarshal cty values in YAML format. +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// This is an alias for Unmarshal on the predefined Converter in "Standard". +// +// An error is returned if the given source contains any YAML document +// delimiters. +func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return Standard.Unmarshal(src, ty) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// This is an alias for Marshal on the predefined Converter in "Standard". +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func Marshal(v cty.Value) ([]byte, error) { + return Standard.Marshal(v) +} + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +// +// This is an alias for ImpliedType on the predefined Converter in "Standard". +func ImpliedType(src []byte) (cty.Type, error) { + return Standard.ImpliedType(src) +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go new file mode 100644 index 000000000000..e25cee563be8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go new file mode 100644 index 000000000000..8110ce3c37a6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 000000000000..fc3116090818 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 000000000000..aeb73f81a14c --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a38aee4571fb..4cac3c2d3539 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -62,6 +62,8 @@ github.com/NYTimes/gziphandler github.com/StackExchange/wmi # github.com/agext/levenshtein v1.2.1 github.com/agext/levenshtein +# github.com/apparentlymart/go-cidr v1.0.1 +github.com/apparentlymart/go-cidr/cidr # github.com/apparentlymart/go-textseg/v12 v12.0.0 => github.com/apparentlymart/go-textseg/v12 v12.0.0 github.com/apparentlymart/go-textseg/v12/textseg # github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e @@ -129,6 +131,8 @@ github.com/beorn7/perks/quantile github.com/bgentry/go-netrc/netrc # github.com/bgentry/speakeasy v0.1.0 github.com/bgentry/speakeasy +# github.com/bmatcuk/doublestar v1.1.5 +github.com/bmatcuk/doublestar # github.com/boltdb/bolt v1.3.1 ## explicit github.com/boltdb/bolt @@ -317,6 +321,8 @@ github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value # github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 github.com/google/go-querystring/query +# github.com/google/uuid v1.1.1 +github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 # github.com/gophercloud/gophercloud v0.1.0 @@ -381,6 +387,13 @@ github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-connlimit v0.2.0 ## explicit github.com/hashicorp/go-connlimit +# github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 +## explicit +github.com/hashicorp/go-cty-funcs/cidr +github.com/hashicorp/go-cty-funcs/crypto +github.com/hashicorp/go-cty-funcs/encoding +github.com/hashicorp/go-cty-funcs/filesystem +github.com/hashicorp/go-cty-funcs/uuid # github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f => github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f ## explicit github.com/hashicorp/go-discover @@ -447,7 +460,7 @@ github.com/hashicorp/go-version ## explicit github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c +# github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee ## explicit github.com/hashicorp/hcl github.com/hashicorp/hcl/hcl/ast @@ -458,10 +471,13 @@ github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token -# github.com/hashicorp/hcl/v2 v2.5.1 +# github.com/hashicorp/hcl/v2 v2.7.1-0.20201020204811-68a97f93bb48 ## explicit github.com/hashicorp/hcl/v2 github.com/hashicorp/hcl/v2/ext/customdecode +github.com/hashicorp/hcl/v2/ext/dynblock +github.com/hashicorp/hcl/v2/ext/tryfunc +github.com/hashicorp/hcl/v2/ext/typeexpr github.com/hashicorp/hcl/v2/gohcl github.com/hashicorp/hcl/v2/hcldec github.com/hashicorp/hcl/v2/hclsyntax @@ -578,6 +594,7 @@ github.com/mitchellh/hashstructure ## explicit github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.1 +## explicit github.com/mitchellh/reflectwalk # github.com/moby/sys/mountinfo v0.1.3 github.com/moby/sys/mountinfo @@ -762,6 +779,9 @@ github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/msgpack github.com/zclconf/go-cty/cty/set +# github.com/zclconf/go-cty-yaml v1.0.2 +## explicit +github.com/zclconf/go-cty-yaml # go.opencensus.io v0.22.1-0.20190713072201-b4a14686f0a9 ## explicit go.opencensus.io @@ -782,6 +802,7 @@ go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate # golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ## explicit +golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 @@ -1019,5 +1040,6 @@ honnef.co/go/tools/version # github.com/godbus/dbus => github.com/godbus/dbus v5.0.1+incompatible # github.com/golang/protobuf => github.com/golang/protobuf v1.3.4 # github.com/hashicorp/go-discover => github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f +# github.com/hashicorp/hcl => github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee # github.com/hashicorp/nomad/api => ./api # github.com/kr/pty => github.com/kr/pty v1.1.5