Skip to content

Commit

Permalink
S3: Add GCS support
Browse files Browse the repository at this point in the history
  • Loading branch information
alpire authored and jaemk committed Nov 23, 2020
1 parent d33eeb4 commit 5890765
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 30 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

## [unreleased]
### Added
- Add GCS support to S3 backend
### Changed
- Fixed docs refering to github in s3 backend
### Removed
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ fn update() -> Result<(), Box<::std::error::Error>> {
Run the above example to see `self_update` in action: `cargo run --example github --features "archive-tar compression-flate2"`.
There's also an equivalent example for gitlab: `cargo run --example gitlab --features "archive-tar compression-flate2"`.

Amazon S3 and DigitalOcean Spaces are also supported through the `S3` backend to check for new releases. Provided a `bucket_name`
Amazon S3, Google GCS, and DigitalOcean Spaces are also supported through the `S3` backend to check for new releases. Provided a `bucket_name`
and `asset_prefix` string, `self_update` will look up all matching files using the following format
as a convention for the filenames: `[directory/]<asset name>-<semver>-<platform/target>.<extension>`.
Leading directories will be stripped from the file name allowing the use of subdirectories in the S3 bucket,
Expand Down
60 changes: 32 additions & 28 deletions src/backends/s3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,11 @@ const MAX_KEYS: u8 = 100;

/// The service end point.
///
/// Currently S3 and DigitalOcean Spaces supported.
/// Currently S3, GCS, and DigitalOcean Spaces supported.
#[derive(Clone, Copy, Debug)]
pub enum EndPoint {
S3,
GCS,
DigitalOceanSpaces,
}

Expand Down Expand Up @@ -83,11 +84,7 @@ impl ReleaseListBuilder {
} else {
bail!(Error::Config, "`bucket_name` required")
},
region: if let Some(ref region) = self.region {
region.to_owned()
} else {
bail!(Error::Config, "`region` required")
},
region: self.region.clone(),
asset_prefix: self.asset_prefix.clone(),
target: self.target.clone(),
})
Expand All @@ -102,7 +99,7 @@ pub struct ReleaseList {
bucket_name: String,
asset_prefix: Option<String>,
target: Option<String>,
region: String,
region: Option<String>,
}

impl ReleaseList {
Expand Down Expand Up @@ -334,11 +331,7 @@ impl UpdateBuilder {
} else {
bail!(Error::Config, "`bucket_name` required")
},
region: if let Some(ref region) = self.region {
region.to_owned()
} else {
bail!(Error::Config, "`region` required")
},
region: self.region.clone(),
asset_prefix: self.asset_prefix.clone(),
target: self
.target
Expand Down Expand Up @@ -378,7 +371,7 @@ pub struct Update {
bucket_name: String,
asset_prefix: Option<String>,
target: String,
region: String,
region: Option<String>,
current_version: String,
target_version: Option<String>,
bin_name: String,
Expand Down Expand Up @@ -498,33 +491,44 @@ impl ReleaseUpdate for Update {
fn fetch_releases_from_s3(
end_point: EndPoint,
bucket_name: &str,
region: &str,
region: &Option<String>,
asset_prefix: &Option<String>,
) -> Result<Vec<Release>> {
let prefix = match asset_prefix {
Some(prefix) => format!("&prefix={}", prefix),
None => "".to_string(),
};
let api_url = match end_point {
EndPoint::S3 => format!(
"https://{}.s3.amazonaws.com/?list-type=2&max-keys={}{}",
bucket_name, MAX_KEYS, prefix
),
EndPoint::DigitalOceanSpaces => format!(
"https://{}.{}.digitaloceanspaces.com/?list-type=2&max-keys={}{}",
bucket_name, region, MAX_KEYS, prefix
),
};

debug!("using api url: {:?}", api_url);

let download_base_url = match end_point {
EndPoint::S3 => format!("https://{}.s3.{}.amazonaws.com/", bucket_name, region),
EndPoint::S3 => {
let region = if let Some(region) = region {
region
} else {
bail!(Error::Config, "`region` required")
};
format!("https://{}.s3.{}.amazonaws.com/", bucket_name, region)
}
EndPoint::DigitalOceanSpaces => {
format!("https://{}.{}.digitaloceanspaces.com/", bucket_name, region,)
let region = if let Some(region) = region {
region
} else {
bail!(Error::Config, "`region` required")
};
format!("https://{}.{}.digitaloceanspaces.com/", bucket_name, region)
}
EndPoint::GCS => format!("https://storage.googleapis.com/{}/", bucket_name),
};

let api_url = match end_point {
EndPoint::S3 | EndPoint::DigitalOceanSpaces => format!(
"{}?list-type=2&max-keys={}{}",
download_base_url, MAX_KEYS, prefix
),
EndPoint::GCS => format!("{}?max-keys={}{}", download_base_url, MAX_KEYS, prefix),
};

debug!("using api url: {:?}", api_url);

let resp = reqwest::blocking::Client::new().get(&api_url).send()?;
if !resp.status().is_success() {
bail!(
Expand Down
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ fn update() -> Result<(), Box<::std::error::Error>> {
Run the above example to see `self_update` in action: `cargo run --example github --features "archive-tar compression-flate2"`.
There's also an equivalent example for gitlab: `cargo run --example gitlab --features "archive-tar compression-flate2"`.
Amazon S3 and DigitalOcean Spaces are also supported through the `S3` backend to check for new releases. Provided a `bucket_name`
Amazon S3, Google GCS, and DigitalOcean Spaces are also supported through the `S3` backend to check for new releases. Provided a `bucket_name`
and `asset_prefix` string, `self_update` will look up all matching files using the following format
as a convention for the filenames: `[directory/]<asset name>-<semver>-<platform/target>.<extension>`.
Leading directories will be stripped from the file name allowing the use of subdirectories in the S3 bucket,
Expand Down

0 comments on commit 5890765

Please sign in to comment.