Skip to content

Commit

Permalink
fix(ds): fix destroy query (#1067)
Browse files Browse the repository at this point in the history
<!-- Please make sure there is an issue that this PR is correlated to. -->

## Changes

<!-- If there are frontend changes, please include screenshots. -->
  • Loading branch information
MasterPtato committed Aug 16, 2024
1 parent 1ef72e6 commit f67150f
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 32 deletions.
18 changes: 6 additions & 12 deletions lib/util/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,21 +173,19 @@ impl Default for Backoff {
/// Used in workflow activity inputs/outputs. Using this over BTreeMap is preferred because this does not
/// reorder keys, providing faster insert and lookup.
#[derive(Serialize, Deserialize)]
pub struct HashableMap<K: Eq + Hash, V: Hash> {
map: IndexMap<K, V>,
}
pub struct HashableMap<K: Eq + Hash, V: Hash>(IndexMap<K, V>);

impl<K: Eq + Hash, V: Hash> Deref for HashableMap<K, V> {
type Target = IndexMap<K, V>;

fn deref(&self) -> &Self::Target {
&self.map
&self.0
}
}

impl<K: Eq + Ord + Hash, V: Hash> Hash for HashableMap<K, V> {
fn hash<H: Hasher>(&self, state: &mut H) {
let mut kv = Vec::from_iter(&self.map);
let mut kv = Vec::from_iter(&self.0);
kv.sort_unstable_by(|a, b| a.0.cmp(b.0));
kv.hash(state);
}
Expand All @@ -201,13 +199,11 @@ impl<K: Eq + Hash + fmt::Debug, V: Hash + fmt::Debug> fmt::Debug for HashableMap

impl<K: Eq + Hash + Clone, V: Hash + Clone> Clone for HashableMap<K, V> {
fn clone(&self) -> Self {
HashableMap {
map: self.map.clone(),
}
HashableMap(self.0.clone())
}

fn clone_from(&mut self, other: &Self) {
self.map.clone_from(&other.map);
self.0.clone_from(&other.0);
}
}

Expand All @@ -218,9 +214,7 @@ pub trait AsHashableExt<K: Eq + Hash, V: Hash> {

impl<K: Eq + Clone + Hash, V: Clone + Hash> AsHashableExt<K, V> for HashMap<K, V> {
fn as_hashable(&self) -> HashableMap<K, V> {
HashableMap {
map: self.iter().map(|(k, v)| (k.clone(), v.clone())).collect(),
}
HashableMap(self.iter().map(|(k, v)| (k.clone(), v.clone())).collect())
}
}

Expand Down
2 changes: 1 addition & 1 deletion svc/api/servers/src/route/dc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ pub async fn list(
.await?;
let cluster_id = unwrap!(cluster_res.games.first()).cluster_id;

let mut cluster_dcs_res = ctx
let cluster_dcs_res = ctx
.op(cluster::ops::datacenter::list::Input {
cluster_ids: vec![cluster_id],
})
Expand Down
7 changes: 5 additions & 2 deletions svc/pkg/cluster/src/util/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ pub(crate) async fn create_dns_record(
cf::dns::DnsContent::TXT { .. } => "TXT",
cf::dns::DnsContent::SRV { .. } => "SRV",
};
let list_records_res = get_dns_record(cf_token, zone_id, record_name, dns_type).await?;
let list_records_res =
get_dns_record(cf_token, zone_id, record_name, dns_type).await?;

if let Some(record) = list_records_res {
delete_dns_record(client, zone_id, &record.id).await?;
Expand Down Expand Up @@ -147,7 +148,9 @@ async fn get_dns_record(
dns_type: &str,
) -> GlobalResult<Option<cf::dns::DnsRecord>> {
let list_records_res = reqwest::Client::new()
.get(format!("https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records"))
.get(format!(
"https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records"
))
.bearer_auth(cf_token)
.query(&[("name", record_name), ("type", dns_type)])
.send()
Expand Down
33 changes: 17 additions & 16 deletions svc/pkg/ds/src/workflows/server/destroy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ struct UpdateDbInput {

#[derive(Debug, Serialize, Deserialize, Hash, sqlx::FromRow)]
struct UpdateDbOutput {
ds_server_id: Uuid,
ds_datacenter_id: Uuid,
alloc_id: String,
server_id: Uuid,
datacenter_id: Uuid,
dispatched_job_id: String,
alloc_id: String,
}

#[activity(UpdateDb)]
Expand All @@ -58,19 +58,20 @@ async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult<Upd
sql_fetch_one!(
[ctx, UpdateDbOutput, @tx tx]
"
UPDATE db_ds.servers
SET delete_ts = $2
UPDATE db_ds.servers AS s1
SET destroy_ts = $2
FROM db_ds.servers AS s2
JOIN db_ds.server_nomad AS sn
ON s2.server_id = sn.server_id
WHERE
server_id = $1 AND
delete_ts IS NULL
s1.server_id = $1 AND
s1.server_id = s2.server_id AND
s2.destroy_ts IS NULL
RETURNING
server_id,
datacenter_id
server_nomad.nomad_dispatched_job_id,
server_nomad.nomad_alloc_id,
FROM db_ds.servers AS s
JOIN db_ds.server_nomad AS sn
ON s.server_id = sn.server_id
s1.server_id,
s1.datacenter_id,
sn.nomad_dispatched_job_id AS dispatched_job_id,
sn.nomad_alloc_id AS alloc_id
",
server_id,
ctx.ts(),
Expand Down Expand Up @@ -112,8 +113,8 @@ async fn delete_job(ctx: &ActivityCtx, input: &DeleteJobInput) -> GlobalResult<(
Ok(_) => {
tracing::info!("job stopped");

// TODO: Manually kill the allocation after util_job::JOB_STOP_TIMEOUT
// task::spawn(async move {
// tokio::task::spawn(async move {

// });
}
Err(err) => {
Expand Down
4 changes: 3 additions & 1 deletion svc/pkg/ds/tests/print_test_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,9 @@ async fn print_test_data(ctx: TestCtx) {
invalid_token = ?invalid_token.token.clone().unwrap().token,
build_id = ?build_res.build_id.unwrap(),
env_id = ?env_id,
"test data");
"test data"
);

//
// let runtime = Some(
// proto::backend::pkg::dynamic_servers::server_create::request::Runtime::DockerRuntime(
Expand Down
1 change: 1 addition & 0 deletions svc/pkg/ds/tests/server_get.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ async fn server_get(ctx: TestCtx) {
"some_other_envkey_test".to_string(),
"4325234356".to_string(),
),
("HTTP_PORT".to_string(), "28234".to_string()),
]
.into_iter()
.collect();
Expand Down

0 comments on commit f67150f

Please sign in to comment.