Skip to content

Commit

Permalink
integ: fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
jpculp committed Oct 4, 2022
1 parent 810d6f2 commit 043bc41
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 37 deletions.
26 changes: 11 additions & 15 deletions integ/src/eks_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,29 +165,25 @@ async fn dns_ip(
let ipv4_cidr = kubernetes_network_config.service_ipv4_cidr;

match ipv4_cidr {
Some(dns_ip) => {
return Ok((
IpFamily::Ipv4,
Some(transform_dns_ip(dns_ip, IPV4_DIVIDER, IPV4_OCTET)),
))
}
None => return Ok((IpFamily::Ipv4, None)),
Some(dns_ip) => Ok((
IpFamily::Ipv4,
Some(transform_dns_ip(dns_ip, IPV4_DIVIDER, IPV4_OCTET)),
)),
None => Ok((IpFamily::Ipv4, None)),
}
}
IpFamily::Ipv6 => {
let ipv6_cidr = kubernetes_network_config.service_ipv6_cidr;

match ipv6_cidr {
Some(dns_ip) => {
return Ok((
IpFamily::Ipv6,
Some(transform_dns_ip(dns_ip, IPV6_DIVIDER, IPV6_HEXTET)),
))
}
None => return Ok((IpFamily::Ipv6, None)),
Some(dns_ip) => Ok((
IpFamily::Ipv6,
Some(transform_dns_ip(dns_ip, IPV6_DIVIDER, IPV6_HEXTET)),
)),
None => Ok((IpFamily::Ipv6, None)),
}
}
_ => return Err(ProviderError::new_with_context("Invalid dns ip")),
_ => Err(ProviderError::new_with_context("Invalid dns ip")),
}
}

Expand Down
12 changes: 5 additions & 7 deletions integ/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ pub struct IntegrationTestArgs {

async fn generate_kubeconfig(arguments: &Arguments) -> Result<String> {
// default kube config path is /temp/{CLUSTER_NAME}-{REGION}/kubeconfig.yaml
let kube_config_path = generate_kubeconfig_file_path(&arguments).await?;
let kube_config_path = generate_kubeconfig_file_path(arguments).await?;

// decode and write kubeconfig
info!("decoding and writing kubeconfig ...");
Expand All @@ -108,7 +108,7 @@ async fn generate_kubeconfig(arguments: &Arguments) -> Result<String> {
}

async fn generate_kubeconfig_file_path(arguments: &Arguments) -> Result<String> {
let unique_kube_config_temp_dir = get_kube_config_temp_dir_path(&arguments)?;
let unique_kube_config_temp_dir = get_kube_config_temp_dir_path(arguments)?;

fs::create_dir_all(&unique_kube_config_temp_dir).context(error::CreateDirSnafu)?;

Expand All @@ -135,9 +135,7 @@ fn args_validation(args: &Arguments) -> Result<()> {
match &args.subcommand {
SubCommand::IntegrationTest(integ_test_args) => {
ensure!(
ARCHES.contains(&ArchitectureValues::from(
integ_test_args.ami_arch.as_str().clone()
)),
ARCHES.contains(&ArchitectureValues::from(integ_test_args.ami_arch.as_str())),
error::InvalidArchInputSnafu {
input: integ_test_args.ami_arch.clone()
}
Expand Down Expand Up @@ -266,15 +264,15 @@ async fn run() -> Result<()> {
.context(error::DeletePodSnafu)?;

// Delete tmp directory and kubeconfig.yaml if no input value for argument `kube_config_path`
if &args.kube_config_path == DEFAULT_KUBECONFIG_FILE_NAME {
if args.kube_config_path == DEFAULT_KUBECONFIG_FILE_NAME {
info!("Deleting tmp directory and kubeconfig.yaml ...");
fs::remove_dir_all(get_kube_config_temp_dir_path(&args)?)
.context(error::DeleteTmpDirSnafu)?;
}
}
}
}
Ok({})
Ok(())
}

mod error {
Expand Down
12 changes: 5 additions & 7 deletions integ/src/monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,16 @@ impl<T: BrupopClient> BrupopMonitor<T> {
// verify if Brupop pods (agent, api-server, controller) are in `running` status.
fn check_pods_health(&self, pods: &ObjectList<Pod>) -> bool {
if pods.items.is_empty() {
return false;
false
} else {
return pods.iter().all(|pod| is_pod_running(pod));
return pods.iter().all(is_pod_running);
}
}

// verify if brs has been created properly and initialized `status`.
fn check_shadows_health(&self, bottlerocketshadows: &ObjectList<BottlerocketShadow>) -> bool {
if bottlerocketshadows.items.is_empty() {
return false;
false
} else {
return bottlerocketshadows
.iter()
Expand All @@ -124,7 +124,7 @@ impl<T: BrupopClient> BrupopMonitor<T> {
!= bottlerocket_shadow_status.target_version().to_string()
|| bottlerocket_shadow_status.current_state != BottlerocketShadowState::Idle
{
update_success = update_success & false;
update_success &= false;
}
println!(
"brs: {:?} current_version: {:?} current_state: {:?}",
Expand Down Expand Up @@ -225,9 +225,7 @@ pub mod mock {
// compute the estimated update time to trigger monitor exit
// formula: number_of_node*300 secs + 300 secs
fn estimate_expire_time(number_of_brs: i32) -> i32 {
let expire_time = number_of_brs * ESTIMATED_UPDATE_TIME_EACH_NODE + EXTRA_TIME;

expire_time
number_of_brs * ESTIMATED_UPDATE_TIME_EACH_NODE + EXTRA_TIME
}

fn is_pod_running(pod: &Pod) -> bool {
Expand Down
15 changes: 7 additions & 8 deletions integ/src/nodegroup_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,13 @@ pub async fn create_nodegroup(
// Prepare ami id
//default eks_version to the version that matches cluster
let eks_version = &cluster.version;
let node_ami = find_ami_id(&ssm_client, ami_arch, bottlerocket_version, &eks_version).await?;
let node_ami = find_ami_id(&ssm_client, ami_arch, bottlerocket_version, eks_version).await?;

// Prepare instance type
let instance_type = instance_type(&ec2_client, &node_ami).await?;

// create one time iam instance profile for nodegroup
let iam_instance_profile_arn =
create_iam_instance_profile(&iam_client, &nodegroup_name).await?;
let iam_instance_profile_arn = create_iam_instance_profile(&iam_client, nodegroup_name).await?;

// Mapping one time iam identity to eks cluster
cluster_iam_identity_mapping(&cluster.name, &cluster.region, &iam_instance_profile_arn).await?;
Expand All @@ -92,7 +91,7 @@ pub async fn create_nodegroup(
&node_ami,
&instance_type,
&cluster.clone(),
&nodegroup_name,
nodegroup_name,
)
.await?;

Expand All @@ -106,7 +105,7 @@ pub async fn create_nodegroup(
.build(),
)
.labels(LABEL_BRUPOP_INTERFACE_NAME, BRUPOP_INTERFACE_VERSION)
.nodegroup_name(nodegroup_name.clone())
.nodegroup_name(nodegroup_name)
.cluster_name(&cluster.name)
.subnets(first_subnet_id(&cluster.private_subnet_ids)?)
.node_role(&iam_instance_profile_arn)
Expand Down Expand Up @@ -141,7 +140,7 @@ pub async fn terminate_nodegroup(cluster: ClusterInfo, nodegroup_name: &str) ->
// Delete nodegroup from cluster
eks_client
.delete_nodegroup()
.nodegroup_name(nodegroup_name.clone())
.nodegroup_name(nodegroup_name)
.cluster_name(&cluster.name)
.send()
.await
Expand All @@ -156,7 +155,7 @@ pub async fn terminate_nodegroup(cluster: ClusterInfo, nodegroup_name: &str) ->
.context("Timed-out waiting for instances to be fully deleted")??;

// Delete one time iam instance profile for nodegroup which created by integration test.
delete_iam_instance_profile(&iam_client, &nodegroup_name).await?;
delete_iam_instance_profile(&iam_client, nodegroup_name).await?;

// Delete nodegroup launch template which created by integration test.
delete_launch_template(&ec2_client, nodegroup_name).await?;
Expand Down Expand Up @@ -528,7 +527,7 @@ async fn non_conforming_nodegroup(
}
}
"delete" => confirm_nodegroup_deleted(eks_client, cluster_name, nodegroup_name).await,
_ => return Err(ProviderError::new_with_context("Invalid action input")),
_ => Err(ProviderError::new_with_context("Invalid action input")),
}
}

Expand Down

0 comments on commit 043bc41

Please sign in to comment.