Skip to content

Commit

Permalink
Merge pull request #304 from SakulK/v2-horizontal-pod-autoscaler
Browse files Browse the repository at this point in the history
Add support for autoscaling/v2 HorizontalPodAutoscaler
  • Loading branch information
hagay3 authored Jun 28, 2023
2 parents 12b61c1 + 754f407 commit e08bba0
Show file tree
Hide file tree
Showing 7 changed files with 557 additions and 11 deletions.
12 changes: 6 additions & 6 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly "'
- run: 'sbt ''it:testOnly * -- -l HorizontalPodAutoscalerV2Tag'''

integration-kubernetes-v1-20:
name: integration-kubernetes-v1-20
Expand All @@ -176,7 +176,7 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly "'
- run: 'sbt ''it:testOnly * -- -l HorizontalPodAutoscalerV2Tag'''

integration-kubernetes-v1-21:
name: integration-kubernetes-v1-21
Expand All @@ -199,7 +199,7 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly "'
- run: 'sbt ''it:testOnly * -- -l HorizontalPodAutoscalerV2Tag'''

integration-kubernetes-v1-22:
name: integration-kubernetes-v1-22
Expand All @@ -222,7 +222,7 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly * -- -l CustomResourceTag"'
- run: 'sbt ''it:testOnly * -- -l CustomResourceTag -l HorizontalPodAutoscalerV2Tag'''

integration-kubernetes-v1-23:
name: integration-kubernetes-v1-23
Expand All @@ -245,7 +245,7 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly * -- -l CustomResourceTag"'
- run: 'sbt ''it:testOnly * -- -l CustomResourceTag'''

integration-kubernetes-v1-24:
name: integration-kubernetes-v1-24
Expand All @@ -268,4 +268,4 @@ jobs:
github token: ${{ secrets.GITHUB_TOKEN }}
start args: '--extra-config=apiserver.disable-admission-plugins=ServiceAccount --extra-config=apiserver.enable-admission-plugins=NamespaceLifecycle'

- run: 'sbt "it:testOnly * -- -l CustomResourceTag"'
- run: 'sbt ''it:testOnly * -- -l CustomResourceTag'''
10 changes: 5 additions & 5 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def workflowJobMinikube(jobName: String, k8sServerVersion: String, excludedTests
val finalSbtCommand: String = {
val additionalFlags: String = {
if (excludedTestsTags.nonEmpty) {
s"* -- -l ${excludedTestsTags.mkString(" ")}"
s"* -- ${excludedTestsTags.map(tag => s"-l $tag").mkString(" ")}"
} else {
""
}
Expand Down Expand Up @@ -134,10 +134,10 @@ inThisBuild(List(
githubWorkflowTargetTags ++= Seq("v*"),
githubWorkflowBuild := Seq(WorkflowStep.Sbt(List("test", "It/compile"))),
githubWorkflowAddedJobs := Seq(
workflowJobMinikube(jobName = "integration-kubernetes-v1-19", k8sServerVersion = "v1.19.6"),
workflowJobMinikube(jobName = "integration-kubernetes-v1-20", k8sServerVersion = "v1.20.11"),
workflowJobMinikube(jobName = "integration-kubernetes-v1-21", k8sServerVersion = "v1.21.5"),
workflowJobMinikube(jobName = "integration-kubernetes-v1-22", k8sServerVersion = "v1.22.9", List("CustomResourceTag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-19", k8sServerVersion = "v1.19.6", List("HorizontalPodAutoscalerV2Tag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-20", k8sServerVersion = "v1.20.11", List("HorizontalPodAutoscalerV2Tag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-21", k8sServerVersion = "v1.21.5", List("HorizontalPodAutoscalerV2Tag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-22", k8sServerVersion = "v1.22.9", List("CustomResourceTag", "HorizontalPodAutoscalerV2Tag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-23", k8sServerVersion = "v1.23.6", List("CustomResourceTag")),
workflowJobMinikube(jobName = "integration-kubernetes-v1-24", k8sServerVersion = "v1.24.1", List("CustomResourceTag"))
),
Expand Down
139 changes: 139 additions & 0 deletions client/src/it/scala/skuber/HorizontalPodAutoscalerV2Spec.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
package skuber

import org.scalatest.{BeforeAndAfterAll, Tag}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.matchers.should.Matchers
import skuber.FutureUtil.FutureOps
import skuber.apps.v1.Deployment
import skuber.autoscaling.v2.HorizontalPodAutoscaler
import skuber.autoscaling.v2.HorizontalPodAutoscaler._

import java.util.UUID.randomUUID
import scala.concurrent.Future
import scala.concurrent.duration._

class HorizontalPodAutoscalerV2Spec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll with ScalaFutures {
// Tagging the tests in order to exclude them in earlier CI k8s versions (before 1.23)
object HorizontalPodAutoscalerV2Tag extends Tag("HorizontalPodAutoscalerV2Tag")

val horizontalPodAutoscaler1: String = randomUUID().toString
val horizontalPodAutoscaler2: String = randomUUID().toString
val horizontalPodAutoscaler3: String = randomUUID().toString

val deployment1: String = randomUUID().toString
val deployment2: String = randomUUID().toString
val deployment3: String = randomUUID().toString

override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.second)

override def afterAll(): Unit = {
val k8s = k8sInit(config)

val results1 = Future.sequence(List(horizontalPodAutoscaler1, horizontalPodAutoscaler2, horizontalPodAutoscaler3).map { name =>
k8s.delete[HorizontalPodAutoscaler](name).withTimeout().recover { case _ => () }
}).withTimeout()

val results2 = {
val futures = Future.sequence(List(deployment1, deployment2, deployment3).map(name => k8s.delete[Deployment](name).withTimeout())).withTimeout()
futures.recover { case _ =>
()
}
}

results1.futureValue
results2.futureValue

for {
_ <- results1
_ <- results2
} yield {
k8s.close
system.terminate().recover { case _ => () }.valueT
}

}


behavior of "HorizontalPodAutoscalerV2"

it should "create a HorizontalPodAutoscaler" taggedAs HorizontalPodAutoscalerV2Tag in { k8s =>

println(horizontalPodAutoscaler1)
k8s.create(getNginxDeployment(deployment1, "1.7.9")).valueT
val result = k8s.create(HorizontalPodAutoscaler(horizontalPodAutoscaler1).withSpec(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(2)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80))))
.withBehavior(HorizontalPodAutoscalerBehavior(
scaleDown = Some(HPAScalingRules(List(HPAScalingPolicy(60, "Pods", 2)), Some("Max"), Some(100))),
scaleUp = Some(HPAScalingRules(List(HPAScalingPolicy(120, "Pods", 1)), Some("Max"), Some(5)))
)))).valueT

assert(result.name == horizontalPodAutoscaler1)
assert(result.spec.contains(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(2)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80))))
.withBehavior(HorizontalPodAutoscalerBehavior(
scaleDown = Some(HPAScalingRules(List(HPAScalingPolicy(60, "Pods", 2)), Some("Max"), Some(100))),
scaleUp = Some(HPAScalingRules(List(HPAScalingPolicy(120, "Pods", 1)), Some("Max"), Some(5)))
))))
}

it should "update a HorizontalPodAutoscaler" taggedAs HorizontalPodAutoscalerV2Tag in { k8s =>

k8s.create(getNginxDeployment(deployment2, "1.7.9")).valueT
val created = k8s.create(HorizontalPodAutoscaler(horizontalPodAutoscaler2).withSpec(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(2)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80)))))).valueT

Thread.sleep(5000)

val existing = k8s.get[HorizontalPodAutoscaler](created.name).valueT
val updated = existing.withSpec(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(3)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80)))))
k8s.update(updated).valueT

Thread.sleep(5000)
eventually(timeout(30.seconds), interval(3.seconds)) {
val result = k8s.get[HorizontalPodAutoscaler](created.name).valueT

assert(result.name == horizontalPodAutoscaler2)
assert(result.spec.contains(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(3)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80))))))
}

}

it should "delete a HorizontalPodAutoscaler" taggedAs HorizontalPodAutoscalerV2Tag in { k8s =>

k8s.create(getNginxDeployment(deployment3, "1.7.9")).valueT
val created = k8s.create(HorizontalPodAutoscaler(horizontalPodAutoscaler3).withSpec(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
.withMinReplicas(1)
.withMaxReplicas(2)
.addResourceMetric(ResourceMetricSource(Resource.cpu, MetricTarget("Utilization", Some(80)))))).valueT

Thread.sleep(5000)

k8s.delete[HorizontalPodAutoscaler](created.name).valueT

eventually(timeout(30.seconds), interval(3.seconds)) {
whenReady(k8s.get[HorizontalPodAutoscaler](created.name).withTimeout().failed) { result =>
result shouldBe a[K8SException]
result match {
case ex: K8SException => ex.status.code shouldBe Some(404)
case _ => assert(false)
}
}
}


}


}
Loading

0 comments on commit e08bba0

Please sign in to comment.