diff --git a/backend/pkg/backend/backend.go b/backend/pkg/backend/backend.go index ea587d272..1b8aa0ec6 100644 --- a/backend/pkg/backend/backend.go +++ b/backend/pkg/backend/backend.go @@ -31,7 +31,6 @@ import ( databaseTypes "github.com/openclarity/vmclarity/backend/pkg/database/types" "github.com/openclarity/vmclarity/backend/pkg/rest" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator" - "github.com/openclarity/vmclarity/runtime_scan/pkg/provider/aws" "github.com/openclarity/vmclarity/shared/pkg/backendclient" "github.com/openclarity/vmclarity/shared/pkg/log" uibackend "github.com/openclarity/vmclarity/ui_backend/pkg/rest" @@ -129,15 +128,15 @@ func Run(ctx context.Context) { func startOrchestrator(ctx context.Context, config *_config.Config, client *backendclient.BackendClient) error { orchestratorConfig, err := orchestrator.LoadConfig(config.BackendRestHost, config.BackendRestPort, rest.BaseURL) if err != nil { - return fmt.Errorf("failed to load runtime scan orchestrator config: %w", err) + return fmt.Errorf("failed to load Orchestrator config: %w", err) } - p, err := aws.New(ctx, orchestratorConfig.AWSConfig) + o, err := orchestrator.New(ctx, orchestratorConfig, client) if err != nil { - return fmt.Errorf("failed to create provider client: %w", err) + return fmt.Errorf("failed to initialize Orchestrator: %w", err) } - orchestrator.New(orchestratorConfig, p, client).Start(ctx) + o.Start(ctx) return nil } diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 000000000..3f6da429d --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,46 @@ +# Configuration + +## Orchestrator + +| Environment Variable | Required | Default | Description | +|-------------------------------------------|-----------|---------|----------------------------------------------| +| `DELETE_JOB_POLICY` | | | | +| `SCANNER_CONTAINER_IMAGE` | | | | +| `GITLEAKS_BINARY_PATH` | | | | +| `CLAM_BINARY_PATHCLAM_BINARY_PATH` | | | | +| `FRESHCLAM_BINARY_PATH` | | | | +| `ALTERNATIVE_FRESHCLAM_MIRROR_URL` | | | | +| `LYNIS_INSTALL_PATH` | | | | +| `SCANNER_VMCLARITY_BACKEND_ADDRESS` | | | | +| `EXPLOIT_DB_ADDRESS` | | | | +| `TRIVY_SERVER_ADDRESS` | | | | +| `TRIVY_SERVER_TIMEOUT` | | | | +| `GRYPE_SERVER_ADDRESS` | | | | +| `GRYPE_SERVER_TIMEOUT` | | | | +| `CHKROOTKIT_BINARY_PATH` | | | | +| `SCAN_CONFIG_POLLING_INTERVAL` | | | | +| `SCAN_CONFIG_RECONCILE_TIMEOUT` | | | | +| `SCAN_POLLING_INTERVAL` | | | | +| `SCAN_RECONCILE_TIMEOUT` | | | | +| `SCAN_TIMEOUT` | | | | +| `SCAN_RESULT_POLLING_INTERVAL` | | | | +| `SCAN_RESULT_RECONCILE_TIMEOUT` | | | | +| `SCAN_RESULT_PROCESSOR_POLLING_INTERVAL` | | | | +| `SCAN_RESULT_PROCESSOR_RECONCILE_TIMEOUT` | | | | +| `DISCOVERY_INTERVAL` | | | | +| `CONTROLLER_STARTUP_DELAY` | | | | +| `PROVIDER` | **yes** | `aws` | Provider used for Target discovery and scans | + +## Provider + +### AWS + +| Environment Variable | Required | Default | Description | +|----------------------------------------|----------|--------------|-------------------------------------------------------------------------------| +| `VMCLARITY_AWS_REGION` | **yes** | | Region where the Scanner instance needs to be created | +| `VMCLARITY_AWS_SUBNET_ID` | **yes** | | SubnetID where the Scanner instance needs to be created | +| `VMCLARITY_AWS_SECURITY_GROUP_ID` | **yes** | | SecurityGroupId which needs to be attached to the Scanner instance | +| `VMCLARITY_AWS_KEYPAIR_NAME` | | | Name of the SSH KeyPair to use for Scanner instance launch | +| `VMCLARITY_AWS_SCANNER_AMI_ID` | **yes** | | The AMI image used for creating Scanner instance | +| `VMCLARITY_AWS_SCANNER_INSTANCE_TYPE` | | `t2.large` | The instance type used for Scanner instance | +| `VMCLARITY_AWS_BLOCK_DEVICE_NAME` | | `xvdh` | Block device name used for attaching Scanner volume to the Scanner instance | diff --git a/installation/aws/VmClarity.cfn b/installation/aws/VmClarity.cfn index ec249a629..4cbcaa286 100644 --- a/installation/aws/VmClarity.cfn +++ b/installation/aws/VmClarity.cfn @@ -229,21 +229,46 @@ Resources: content: Fn::Sub: - | - AWS_REGION=${AWS::Region} - SCANNER_AWS_REGION=${AWS::Region} - AWS_SUBNET_ID=${VmClarityScannerSubnet} - AWS_SECURITY_GROUP_ID=${VmClarityScannerSecurityGroup} - AWS_INSTANCE_TYPE=${ScannerInstanceType} - SCANNER_KEY_PAIR_NAME=${KeyName} - AWS_JOB_IMAGE_ID=${JobImageID} + ## + ## Orchestrator configuration + ## + # Host for the VMClarity backend server BACKEND_REST_HOST=__BACKEND_REST_HOST__ + # Port number for the VMClarity backend server BACKEND_REST_PORT=8888 + # Container image for Scanner instance SCANNER_CONTAINER_IMAGE=${ScannerContainerImage} + # Trivy server address TRIVY_SERVER_ADDRESS=http://__BACKEND_REST_HOST__:9992 + # Grype server address GRYPE_SERVER_ADDRESS=__BACKEND_REST_HOST__:9991 - DELETE_JOB_POLICY=${AssetScanDeletePolicy} + # FreshClam mirror URL ALTERNATIVE_FRESHCLAM_MIRROR_URL=http://__BACKEND_REST_HOST__:1000/clamav - - JobImageID: !FindInMap + # Resource cleanup policy + DELETE_JOB_POLICY=${AssetScanDeletePolicy} + # Provider to use + PROVIDER=aws + + ## + ## Provider configuration + ## + # The AWS region where the provider is deployed + AWS_REGION=${AWS::Region} + # Region where the Scanner instance needs to be created + VMCLARITY_AWS_SCANNER_REGION=${AWS::Region} + # SubnetID where the Scanner instance needs to be created + VMCLARITY_AWS_SUBNET_ID=${VmClarityScannerSubnet} + # SecurityGroupId which needs to be attached to the Scanner instance + VMCLARITY_AWS_SECURITY_GROUP_ID=${VmClarityScannerSecurityGroup} + # Name of the SSH KeyPair to use for Scanner instance launch + VMCLARITY_AWS_KEYPAIR_NAME=${KeyName} + # The AMI image used for creating Scanner instance + VMCLARITY_AWS_SCANNER_AMI_ID=${ScannerImageID} + # The instance type used for Scanner instance + VMCLARITY_AWS_SCANNER_INSTANCE_TYPE=${ScannerInstanceType} + # Block device name used for attaching Scanner volume to the Scanner instance + #VMCLARITY_AWS_BLOCK_DEVICE_NAME=xvhd + - ScannerImageID: !FindInMap - AWSRegionArch2AMI - !Ref "AWS::Region" - !FindInMap diff --git a/runtime_scan/pkg/orchestrator/config.go b/runtime_scan/pkg/orchestrator/config.go index 87d9dad1b..2ae4a1985 100644 --- a/runtime_scan/pkg/orchestrator/config.go +++ b/runtime_scan/pkg/orchestrator/config.go @@ -19,31 +19,28 @@ import ( "fmt" "net" "strconv" + "strings" "time" "github.com/spf13/viper" + "github.com/openclarity/vmclarity/api/models" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/discovery" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanconfigwatcher" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanresultprocessor" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanresultwatcher" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanwatcher" - "github.com/openclarity/vmclarity/runtime_scan/pkg/provider/aws" ) const ( - ScannerAWSRegion = "SCANNER_AWS_REGION" DeleteJobPolicy = "DELETE_JOB_POLICY" ScannerContainerImage = "SCANNER_CONTAINER_IMAGE" - ScannerKeyPairName = "SCANNER_KEY_PAIR_NAME" GitleaksBinaryPath = "GITLEAKS_BINARY_PATH" ClamBinaryPath = "CLAM_BINARY_PATH" FreshclamBinaryPath = "FRESHCLAM_BINARY_PATH" AlternativeFreshclamMirrorURL = "ALTERNATIVE_FRESHCLAM_MIRROR_URL" LynisInstallPath = "LYNIS_INSTALL_PATH" - AttachedVolumeDeviceName = "ATTACHED_VOLUME_DEVICE_NAME" ScannerBackendAddress = "SCANNER_VMCLARITY_BACKEND_ADDRESS" - ScanConfigWatchInterval = "SCAN_CONFIG_WATCH_INTERVAL" ExploitDBAddress = "EXPLOIT_DB_ADDRESS" TrivyServerAddress = "TRIVY_SERVER_ADDRESS" TrivyServerTimeout = "TRIVY_SERVER_TIMEOUT" @@ -67,20 +64,21 @@ const ( DiscoveryInterval = "DISCOVERY_INTERVAL" ControllerStartupDelay = "CONTROLLER_STARTUP_DELAY" + + ProviderKind = "PROVIDER" ) const ( - DefaultScannerAWSRegion = "us-east-1" - DefaultAttachedVolumeDeviceName = "xvdh" - DefaultTrivyServerTimeout = 5 * time.Minute DefaultGrypeServerTimeout = 2 * time.Minute DefaultControllerStartupDelay = 15 * time.Second + DefaultProviderKind = models.AWS ) type Config struct { - AWSConfig *aws.Config + ProviderKind models.CloudProvider + ScannerBackendAddress string // The Orchestrator starts the Controller(s) in a sequence and the ControllerStartupDelay is used for waiting @@ -96,8 +94,6 @@ type Config struct { } func setConfigDefaults(backendHost string, backendPort int, backendBaseURL string) { - viper.SetDefault(ScannerAWSRegion, DefaultScannerAWSRegion) - viper.SetDefault(ScanConfigWatchInterval, "30s") viper.SetDefault(DeleteJobPolicy, string(scanresultwatcher.DeleteJobPolicyAlways)) viper.SetDefault(ScannerBackendAddress, fmt.Sprintf("http://%s%s", net.JoinHostPort(backendHost, strconv.Itoa(backendPort)), backendBaseURL)) // https://github.com/openclarity/vmclarity-tools-base/blob/main/Dockerfile#L33 @@ -107,7 +103,6 @@ func setConfigDefaults(backendHost string, backendPort int, backendBaseURL strin // https://github.com/openclarity/vmclarity-tools-base/blob/main/Dockerfile viper.SetDefault(ChkrootkitBinaryPath, "/artifacts/chkrootkit") viper.SetDefault(ExploitDBAddress, fmt.Sprintf("http://%s", net.JoinHostPort(backendHost, "1326"))) - viper.SetDefault(AttachedVolumeDeviceName, DefaultAttachedVolumeDeviceName) viper.SetDefault(ClamBinaryPath, "clamscan") viper.SetDefault(FreshclamBinaryPath, "freshclam") viper.SetDefault(TrivyServerTimeout, DefaultTrivyServerTimeout) @@ -123,6 +118,7 @@ func setConfigDefaults(backendHost string, backendPort int, backendBaseURL strin viper.SetDefault(ScanResultProcessorReconcileTimeout, scanresultprocessor.DefaultReconcileTimeout.String()) viper.SetDefault(DiscoveryInterval, discovery.DefaultInterval.String()) viper.SetDefault(ControllerStartupDelay, DefaultControllerStartupDelay.String()) + viper.SetDefault(ProviderKind, DefaultProviderKind) viper.AutomaticEnv() } @@ -130,11 +126,17 @@ func setConfigDefaults(backendHost string, backendPort int, backendBaseURL strin func LoadConfig(backendHost string, backendPort int, baseURL string) (*Config, error) { setConfigDefaults(backendHost, backendPort, baseURL) - c := &Config{ - AWSConfig: aws.LoadConfig(), + var providerKind models.CloudProvider + switch strings.ToLower(viper.GetString(ProviderKind)) { + case strings.ToLower(string(models.AWS)): + fallthrough + default: + providerKind = models.AWS + } + c := &Config{ + ProviderKind: providerKind, ControllerStartupDelay: viper.GetDuration(ControllerStartupDelay), - DiscoveryConfig: discovery.Config{ DiscoveryInterval: viper.GetDuration(DiscoveryInterval), }, @@ -152,14 +154,11 @@ func LoadConfig(backendHost string, backendPort int, baseURL string) (*Config, e PollPeriod: viper.GetDuration(ScanResultPollingInterval), ReconcileTimeout: viper.GetDuration(ScanResultReconcileTimeout), ScannerConfig: scanresultwatcher.ScannerConfig{ - Region: viper.GetString(ScannerAWSRegion), DeleteJobPolicy: scanresultwatcher.GetDeleteJobPolicyType(viper.GetString(DeleteJobPolicy)), ScannerImage: viper.GetString(ScannerContainerImage), ScannerBackendAddress: viper.GetString(ScannerBackendAddress), - ScannerKeyPairName: viper.GetString(ScannerKeyPairName), GitleaksBinaryPath: viper.GetString(GitleaksBinaryPath), LynisInstallPath: viper.GetString(LynisInstallPath), - DeviceName: viper.GetString(AttachedVolumeDeviceName), ExploitsDBAddress: viper.GetString(ExploitDBAddress), ClamBinaryPath: viper.GetString(ClamBinaryPath), FreshclamBinaryPath: viper.GetString(FreshclamBinaryPath), diff --git a/runtime_scan/pkg/orchestrator/orchestrator.go b/runtime_scan/pkg/orchestrator/orchestrator.go index 77f326c4c..47ac46ea9 100644 --- a/runtime_scan/pkg/orchestrator/orchestrator.go +++ b/runtime_scan/pkg/orchestrator/orchestrator.go @@ -17,14 +17,17 @@ package orchestrator import ( "context" + "fmt" "time" + "github.com/openclarity/vmclarity/api/models" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/discovery" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanconfigwatcher" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanresultprocessor" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanresultwatcher" "github.com/openclarity/vmclarity/runtime_scan/pkg/orchestrator/scanwatcher" "github.com/openclarity/vmclarity/runtime_scan/pkg/provider" + "github.com/openclarity/vmclarity/runtime_scan/pkg/provider/aws" "github.com/openclarity/vmclarity/shared/pkg/backendclient" "github.com/openclarity/vmclarity/shared/pkg/log" ) @@ -36,7 +39,10 @@ type Orchestrator struct { controllerStartupDelay time.Duration } -func New(config *Config, p provider.Provider, b *backendclient.BackendClient) *Orchestrator { +// NewWithProvider returns an Orchestrator initialized using the p provider.Provider. +// Use this method when Orchestrator needs to rely on custom provider.Provider implementation. +// E.g. End-to-End testing. +func NewWithProvider(config *Config, p provider.Provider, b *backendclient.BackendClient) (*Orchestrator, error) { scanConfigWatcherConfig := config.ScanConfigWatcherConfig.WithBackendClient(b) discoveryConfig := config.DiscoveryConfig.WithBackendClient(b).WithProviderClient(p) scanWatcherConfig := config.ScanWatcherConfig.WithBackendClient(b).WithProviderClient(p) @@ -52,11 +58,22 @@ func New(config *Config, p provider.Provider, b *backendclient.BackendClient) *O scanresultwatcher.New(scanResultWatcherConfig), }, controllerStartupDelay: config.ControllerStartupDelay, + }, nil +} + +// New returns a new Orchestrator initialized using the provided configuration. +func New(ctx context.Context, config *Config, b *backendclient.BackendClient) (*Orchestrator, error) { + p, err := NewProvider(ctx, config.ProviderKind) + if err != nil { + return nil, fmt.Errorf("failed to initialize provider. Provider=%s: %w", config.ProviderKind, err) } + + return NewWithProvider(config, p, b) } +// Start makes the Orchestrator to start all Controller(s). func (o *Orchestrator) Start(ctx context.Context) { - log.GetLoggerFromContextOrDiscard(ctx).Infof("Starting Orchestrator server") + log.GetLoggerFromContextOrDiscard(ctx).Info("Starting Orchestrator server") ctx, cancel := context.WithCancel(ctx) o.cancelFunc = cancel @@ -67,10 +84,22 @@ func (o *Orchestrator) Start(ctx context.Context) { } } +// Start makes the Orchestrator to stop all Controller(s). func (o *Orchestrator) Stop(ctx context.Context) { - log.GetLoggerFromContextOrDiscard(ctx).Infof("Stopping Orchestrator server") + log.GetLoggerFromContextOrDiscard(ctx).Info("Stopping Orchestrator server") if o.cancelFunc != nil { o.cancelFunc() } } + +// nolint:wrapcheck +// NewProvider returns an initialized provider.Provider based on the kind models.CloudProvider. +func NewProvider(ctx context.Context, kind models.CloudProvider) (provider.Provider, error) { + switch kind { + case models.AWS: + return aws.New(ctx) + default: + return nil, fmt.Errorf("unsupported provider: %s", kind) + } +} diff --git a/runtime_scan/pkg/orchestrator/scanresultwatcher/config.go b/runtime_scan/pkg/orchestrator/scanresultwatcher/config.go index 58496cab1..acae51e9f 100644 --- a/runtime_scan/pkg/orchestrator/scanresultwatcher/config.go +++ b/runtime_scan/pkg/orchestrator/scanresultwatcher/config.go @@ -61,12 +61,6 @@ func (c Config) WithScannerConfig(s ScannerConfig) Config { } type ScannerConfig struct { - // We need to know where the VMClarity scanner is running so that we - // can boot the scanner jobs in the same region, there isn't a - // mechanism to discover this right now so its passed in as a config - // value. - Region string - // Address that the Scanner should use to talk to the VMClarity backend // We use a configuration variable for this instead of discovering it // automatically in case VMClarity backend has multiple IPs (internal @@ -89,10 +83,6 @@ type ScannerConfig struct { // tools. ScannerImage string - // The key pair name that should be attached to the scanner VM instance. - // Mainly used for debugging. - ScannerKeyPairName string - // The gitleaks binary path in the scanner image container. GitleaksBinaryPath string @@ -110,7 +100,4 @@ type ScannerConfig struct { // The chkrootkit binary path in the scanner image container. ChkrootkitBinaryPath string - - // the name of the block device to attach to the scanner job - DeviceName string } diff --git a/runtime_scan/pkg/orchestrator/scanresultwatcher/helpers.go b/runtime_scan/pkg/orchestrator/scanresultwatcher/helpers.go index 969cb8db7..ddfe1e0ea 100644 --- a/runtime_scan/pkg/orchestrator/scanresultwatcher/helpers.go +++ b/runtime_scan/pkg/orchestrator/scanresultwatcher/helpers.go @@ -78,9 +78,6 @@ func newJobConfig(i *jobConfigInput) (*provider.ScanJobConfig, error) { ScannerImage: i.config.ScannerImage, ScannerCLIConfig: string(scannerConfigYAML), VMClarityAddress: i.config.ScannerBackendAddress, - KeyPairName: i.config.ScannerKeyPairName, - ScannerRegion: i.config.Region, - BlockDeviceName: i.config.DeviceName, ScanMetadata: provider.ScanMetadata{ ScanID: i.scanResult.Scan.Id, ScanResultID: *i.scanResult.Id, diff --git a/runtime_scan/pkg/provider/aws/client.go b/runtime_scan/pkg/provider/aws/client.go index b6aca7e06..74675d313 100644 --- a/runtime_scan/pkg/provider/aws/client.go +++ b/runtime_scan/pkg/provider/aws/client.go @@ -37,12 +37,21 @@ import ( type Client struct { ec2Client *ec2.Client - awsConfig *Config + config *Config } -func New(ctx context.Context, config *Config) (*Client, error) { +func New(ctx context.Context) (*Client, error) { + config, err := NewConfig() + if err != nil { + return nil, fmt.Errorf("invalid configuration. Provider=AWS: %w", err) + } + + if err = config.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate provider configuration. Provider=AWS: %w", err) + } + awsClient := Client{ - awsConfig: config, + config: config, } cfg, err := awsconfig.LoadDefaultConfig(ctx) @@ -226,8 +235,8 @@ func (c *Client) createInstance(ctx context.Context, region string, config *prov runParams := &ec2.RunInstancesInput{ MaxCount: utils.PointerTo[int32](1), MinCount: utils.PointerTo[int32](1), - ImageId: &c.awsConfig.AmiID, - InstanceType: ec2types.InstanceType(c.awsConfig.InstanceType), + ImageId: utils.PointerTo(c.config.ScannerImage), + InstanceType: ec2types.InstanceType(c.config.ScannerInstanceType), TagSpecifications: []ec2types.TagSpecification{ { ResourceType: ec2types.ResourceTypeInstance, @@ -251,8 +260,8 @@ func (c *Client) createInstance(ctx context.Context, region string, config *prov AssociatePublicIpAddress: utils.PointerTo(false), DeleteOnTermination: utils.PointerTo(true), DeviceIndex: utils.PointerTo[int32](0), - Groups: []string{c.awsConfig.SecurityGroupID}, - SubnetId: &c.awsConfig.SubnetID, + Groups: []string{c.config.SecurityGroupID}, + SubnetId: &c.config.SubnetID, }, } @@ -274,9 +283,9 @@ func (c *Client) createInstance(ctx context.Context, region string, config *prov retryMaxAttempts = *config.ScannerInstanceCreationConfig.RetryMaxAttempts } - if config.KeyPairName != "" { + if c.config.KeyPairName != "" { // Set a key-pair to the instance. - runParams.KeyName = &config.KeyPairName + runParams.KeyName = &c.config.KeyPairName } // if retryMaxAttempts value is 0 it will be ignored @@ -304,7 +313,7 @@ func (c *Client) RunTargetScan(ctx context.Context, config *provider.ScanJobConf logger := log.GetLoggerFromContextOrDefault(ctx).WithFields(logrus.Fields{ "TargetInstanceID": vmInfo.InstanceID, "TargetLocation": vmInfo.Location, - "ScannerLocation": config.ScannerRegion, + "ScannerLocation": c.config.ScannerRegion, "Provider": string(c.Kind()), }) @@ -323,7 +332,7 @@ func (c *Client) RunTargetScan(ctx context.Context, config *provider.ScanJobConf logger.Trace("Creating scanner VM instance") var err error - scannnerInstance, err = c.createInstance(ctx, config.ScannerRegion, config) + scannnerInstance, err = c.createInstance(ctx, c.config.ScannerRegion, config) if err != nil { errs <- WrapError(fmt.Errorf("failed to create scanner VM instance: %w", err)) return @@ -376,6 +385,7 @@ func (c *Client) RunTargetScan(ctx context.Context, config *provider.ScanJobConf } return } + srcInstance := instanceFromEC2Instance(SrcEC2Instance, c.ec2Client, targetVMLocation.Region, config) logger.WithField("TargetInstanceID", srcInstance.ID).Trace("Found target VM instance") @@ -420,10 +430,10 @@ func (c *Client) RunTargetScan(ctx context.Context, config *provider.ScanJobConf "TargetVolumeID": srcVol.ID, "TargetVolumeSnapshotID": srcVolSnapshot.ID, }).Debug("Copying target volume snapshot to scanner location") - destVolSnapshot, err = srcVolSnapshot.Copy(ctx, config.ScannerRegion) + destVolSnapshot, err = srcVolSnapshot.Copy(ctx, c.config.ScannerRegion) if err != nil { err = fmt.Errorf("failed to copy target volume snapshot to location. TargetVolumeSnapshotID=%s Location=%s: %w", - srcVolSnapshot.ID, config.ScannerRegion, err) + srcVolSnapshot.ID, c.config.ScannerRegion, err) errs <- WrapError(err) return } @@ -500,7 +510,7 @@ func (c *Client) RunTargetScan(ctx context.Context, config *provider.ScanJobConf "ScannerVolumeID": scannerVol.ID, "ScannerIntanceID": scannnerInstance.ID, }).Debug("Attaching scanner volume to scanner VM instance") - err = scannnerInstance.AttachVolume(ctx, scannerVol, config.BlockDeviceName) + err = scannnerInstance.AttachVolume(ctx, scannerVol, c.config.BlockDeviceName) if err != nil { err = fmt.Errorf("failed to attach volume to scanner instance. ScannerVolumeID=%s ScannerInstanceID=%s: %w", scannerVol.ID, scannnerInstance.ID, err) @@ -662,7 +672,7 @@ func (c *Client) RemoveTargetScan(ctx context.Context, config *provider.ScanJobC } logger := log.GetLoggerFromContextOrDefault(ctx).WithFields(logrus.Fields{ - "ScannerLocation": config.ScannerRegion, + "ScannerLocation": c.config.ScannerRegion, "Provider": string(c.Kind()), }) @@ -679,7 +689,7 @@ func (c *Client) RemoveTargetScan(ctx context.Context, config *provider.ScanJobC // Delete scanner instance logger.Debug("Deleting scanner VM Instance.") - done, err := c.deleteInstances(ctx, ec2Filters, config.ScannerRegion) + done, err := c.deleteInstances(ctx, ec2Filters, c.config.ScannerRegion) if err != nil { errs <- WrapError(fmt.Errorf("failed to delete scanner VM instance: %w", err)) return @@ -695,7 +705,8 @@ func (c *Client) RemoveTargetScan(ctx context.Context, config *provider.ScanJobC // Delete scanner volume logger.Debug("Deleting scanner volume.") - done, err = c.deleteVolumes(ctx, ec2Filters, config.ScannerRegion) + done, err = c.deleteVolumes(ctx, ec2Filters, c.config.ScannerRegion) + if err != nil { errs <- WrapError(fmt.Errorf("failed to delete scanner volume: %w", err)) return @@ -716,7 +727,7 @@ func (c *Client) RemoveTargetScan(ctx context.Context, config *provider.ScanJobC defer wg.Done() logger.Debug("Deleting scanner volume snapshot.") - done, err := c.deleteVolumeSnapshots(ctx, ec2Filters, config.ScannerRegion) + done, err := c.deleteVolumeSnapshots(ctx, ec2Filters, c.config.ScannerRegion) if err != nil { errs <- WrapError(fmt.Errorf("failed to delete scanner volume snapshot: %w", err)) return @@ -743,7 +754,7 @@ func (c *Client) RemoveTargetScan(ctx context.Context, config *provider.ScanJobC return } - if location.Region == config.ScannerRegion { + if location.Region == c.config.ScannerRegion { return } diff --git a/runtime_scan/pkg/provider/aws/config.go b/runtime_scan/pkg/provider/aws/config.go index db1df7c2a..f7c472cdd 100644 --- a/runtime_scan/pkg/provider/aws/config.go +++ b/runtime_scan/pkg/provider/aws/config.go @@ -15,40 +15,83 @@ package aws -import "github.com/spf13/viper" +import ( + "fmt" + + "github.com/spf13/viper" +) const ( - AWSSubnetID = "AWS_SUBNET_ID" - AWSJobImageID = "AWS_JOB_IMAGE_ID" - AWSSecurityGroupID = "AWS_SECURITY_GROUP_ID" - AWSInstanceType = "AWS_INSTANCE_TYPE" - defaultAWSJobImageID = "ami-0568773882d492fc8" // ubuntu server 22.04 LTS (HVM), SSD volume type - defaultAWSInstanceType = "t2.large" + DefaultEnvPrefix = "VMCLARITY_AWS" + DefaultScannerInstanceType = "t2.large" + DefaultBlockDeviceName = "xvdh" ) type Config struct { - AmiID string // image id of a scanner job - SubnetID string // the scanner's subnet ID - SecurityGroupID string // the scanner's security group - InstanceType string // the scanner's instance type + // Region where the Scanner instance needs to be created + ScannerRegion string `mapstructure:"scanner_region"` + // SubnetID where the Scanner instance needs to be created + SubnetID string `mapstructure:"subnet_id"` + // SecurityGroupID which needs to be attached to the Scanner instance + SecurityGroupID string `mapstructure:"security_group_id"` + // KeyPairName is the name of the SSH KeyPair to use for Scanner instance launch + KeyPairName string `mapstructure:"keypair_name"` + // ScannerImage is the AMI image used for creating Scanner instance + ScannerImage string `mapstructure:"scanner_ami_id"` + // ScannerInstanceType is the instance type used for Scanner instance + ScannerInstanceType string `mapstructure:"scanner_instance_type"` + // BlockDeviceName contains the block device name used for attaching Scanner volume to the Scanner instance + BlockDeviceName string `mapstructure:"block_device_name"` } -func setConfigDefaults() { - viper.SetDefault(AWSJobImageID, defaultAWSJobImageID) - viper.SetDefault(AWSInstanceType, defaultAWSInstanceType) +func (c *Config) Validate() error { + if c.ScannerRegion == "" { + return fmt.Errorf("parameter Region must be provided") + } + + if c.SubnetID == "" { + return fmt.Errorf("parameter SubnetID must be provided") + } + + if c.SecurityGroupID == "" { + return fmt.Errorf("parameter SecurityGroupID must be provided") + } - viper.AutomaticEnv() + if c.ScannerImage == "" { + return fmt.Errorf("parameter ScannerImage must be provided") + } + + if c.ScannerInstanceType == "" { + return fmt.Errorf("parameter ScannerInstanceType must be provided") + } + + return nil } -func LoadConfig() *Config { - setConfigDefaults() +func NewConfig() (*Config, error) { + // Avoid modifying the global instance + v := viper.New() + + v.SetEnvPrefix(DefaultEnvPrefix) + v.AllowEmptyEnv(true) + v.AutomaticEnv() + + _ = v.BindEnv("scanner_region") + _ = v.BindEnv("subnet_id") + _ = v.BindEnv("security_group_id") + _ = v.BindEnv("keypair_name") + _ = v.BindEnv("scanner_ami_id") + + _ = v.BindEnv("scanner_instance_type") + v.SetDefault("scanner_instance_type", DefaultScannerInstanceType) + + _ = v.BindEnv("block_device_name") + v.SetDefault("block_device_name", DefaultBlockDeviceName) - config := &Config{ - AmiID: viper.GetString(AWSJobImageID), - SubnetID: viper.GetString(AWSSubnetID), - SecurityGroupID: viper.GetString(AWSSecurityGroupID), - InstanceType: viper.GetString(AWSInstanceType), + config := &Config{} + if err := v.Unmarshal(config); err != nil { + return nil, fmt.Errorf("failed to parse provider configuration. Provider=AWS: %w", err) } - return config + return config, nil } diff --git a/runtime_scan/pkg/provider/aws/config_test.go b/runtime_scan/pkg/provider/aws/config_test.go new file mode 100644 index 000000000..ceeff2157 --- /dev/null +++ b/runtime_scan/pkg/provider/aws/config_test.go @@ -0,0 +1,79 @@ +// Copyright © 2023 Cisco Systems, Inc. and its affiliates. +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "os" + "testing" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +func TestConfig(t *testing.T) { + tests := []struct { + Name string + EnvVars map[string]string + + ExpectedNewErrorMatcher types.GomegaMatcher + ExpectedConfig *Config + ExpectedValidateErrorMatcher types.GomegaMatcher + }{ + { + Name: "Valid config", + EnvVars: map[string]string{ + "VMCLARITY_AWS_SCANNER_REGION": "eu-west-1", + "VMCLARITY_AWS_SUBNET_ID": "subnet-038f85dc621fd5b5d", + "VMCLARITY_AWS_SECURITY_GROUP_ID": "sg-02cfdc854e18664d4", + "VMCLARITY_AWS_KEYPAIR_NAME": "vmclarity-ssh-key", + "VMCLARITY_AWS_SCANNER_AMI_ID": "ami-0568773882d492fc8", + "VMCLARITY_AWS_SCANNER_INSTANCE_TYPE": "t3.large", + "VMCLARITY_AWS_BLOCK_DEVICE_NAME": "xvdh", + }, + ExpectedNewErrorMatcher: Not(HaveOccurred()), + ExpectedConfig: &Config{ + ScannerRegion: "eu-west-1", + SubnetID: "subnet-038f85dc621fd5b5d", + SecurityGroupID: "sg-02cfdc854e18664d4", + KeyPairName: "vmclarity-ssh-key", + ScannerImage: "ami-0568773882d492fc8", + ScannerInstanceType: "t3.large", + BlockDeviceName: "xvdh", + }, + ExpectedValidateErrorMatcher: Not(HaveOccurred()), + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewGomegaWithT(t) + + os.Clearenv() + for k, v := range test.EnvVars { + err := os.Setenv(k, v) + g.Expect(err).Should(Not(HaveOccurred())) + } + + config, err := NewConfig() + + g.Expect(err).Should(test.ExpectedNewErrorMatcher) + g.Expect(config).Should(BeEquivalentTo(test.ExpectedConfig)) + + err = config.Validate() + g.Expect(err).Should(test.ExpectedValidateErrorMatcher) + }) + } +} diff --git a/runtime_scan/pkg/provider/types.go b/runtime_scan/pkg/provider/types.go index a2690a827..1ba26be3b 100644 --- a/runtime_scan/pkg/provider/types.go +++ b/runtime_scan/pkg/provider/types.go @@ -61,9 +61,6 @@ type ScanJobConfig struct { ScannerImage string // Scanner Container Image to use containing the vmclarity-cli and tools ScannerCLIConfig string // Scanner CLI config yaml (families config yaml) VMClarityAddress string // The backend address for the scanner CLI to export too - KeyPairName string // The name of the key pair to set on the instance, ignored if not set, used mainly for debugging. - ScannerRegion string // The region where the VMClarity Scanner instance needs to be deployed - BlockDeviceName string // The block device name used for attaching target volume to the scanner vm ScanMetadata models.ScannerInstanceCreationConfig