From 050f3d9792789758b74848545202625fa9337a08 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sun, 13 Oct 2024 09:41:14 -0700 Subject: [PATCH 01/28] Wrap link and ratelimit headers --- src/backoff.rs | 2 +- src/http.rs | 25 +++++--- src/io.rs | 141 +++++++++++++++++++++++++++++--------------- src/remote/query.rs | 24 ++++---- 4 files changed, 125 insertions(+), 67 deletions(-) diff --git a/src/backoff.rs b/src/backoff.rs index 2c58809..fe99034 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -58,7 +58,7 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { // https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#exceeding-the-rate-limit match err.downcast_ref::() { Some(error::GRError::RateLimitExceeded(headers)) => { - self.rate_limit_header = headers.clone(); + self.rate_limit_header = *headers; self.num_retries += 1; if self.num_retries <= self.max_retries { let now = (self.now)(); diff --git a/src/http.rs b/src/http.rs index 3cf7e1c..4bb14b3 100644 --- a/src/http.rs +++ b/src/http.rs @@ -3,13 +3,18 @@ use crate::backoff::{Backoff, Exponential}; use crate::cache::{Cache, CacheState}; use crate::config::ConfigProperties; use crate::error::GRError; -use crate::io::{HttpRunner, RateLimitHeader, Response, ResponseField}; +use crate::io::{ + parse_link_headers, parse_page_headers, parse_ratelimit_headers, FlowControlHeaders, + HttpRunner, RateLimitHeader, Response, ResponseField, +}; use crate::time::{self, now_epoch_seconds, Milliseconds, Seconds}; use crate::{api_defaults, error, log_debug, log_error}; use crate::{log_info, Result}; use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; use std::collections::{hash_map, HashMap}; use std::iter::Iterator; +use std::rc::Rc; use std::sync::{Arc, Mutex}; use ureq::Error; @@ -68,6 +73,9 @@ impl Client { ); headers }); + let rate_limit_header = Rc::new(parse_ratelimit_headers(Some(&headers))); + let page_header = Rc::new(parse_page_headers(Some(&headers))); + let flow_control_headers = FlowControlHeaders::new(page_header, rate_limit_header); // log debug response headers log_debug!("Response headers: {:?}", headers); let body = response.into_string().unwrap_or_default(); @@ -75,6 +83,7 @@ impl Client { .status(status) .body(body) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap(); self.handle_rate_limit(&response)?; @@ -87,10 +96,10 @@ impl Client { impl Client { fn handle_rate_limit(&self, response: &Response) -> Result<()> { - if let Some(headers) = response.get_ratelimit_headers() { + if let Some(headers) = response.get_ratelimit_headers().borrow() { if headers.remaining <= self.config.rate_limit_remaining_threshold() { log_error!("Rate limit threshold reached"); - return Err(error::GRError::RateLimitExceeded(headers).into()); + return Err(error::GRError::RateLimitExceeded(*headers).into()); } Ok(()) } else { @@ -405,6 +414,7 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Paginato if self.iter >= 1 { self.request.set_url(page_url); } + // TODO throttle after first page log_info!("Requesting page: {}", self.iter + 1); log_info!("URL: {}", self.request.url()); if let Some(throttle_time) = self.throttle_time { @@ -416,11 +426,9 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Paginato } match self.backoff.retry_on_error(&mut self.request) { Ok(response) => { - if let Some(page_headers) = response.get_page_headers() { - let next_page = page_headers.next; - let last_page = page_headers.last; - match (next_page, last_page) { - (Some(next), _) => self.page_url = Some(next.url), + if let Some(page_headers) = response.get_page_headers().borrow() { + match (page_headers.next_page(), page_headers.last_page()) { + (Some(next), _) => self.page_url = Some(next.url().to_string()), (None, _) => self.page_url = None, } self.iter += 1; @@ -434,6 +442,7 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Paginato return Some(Err(err)); } } + // Get response and throttle. } None } diff --git a/src/io.rs b/src/io.rs index eb541b0..10040ac 100644 --- a/src/io.rs +++ b/src/io.rs @@ -13,8 +13,10 @@ use crate::{ use regex::Regex; use serde::Serialize; use std::{ + borrow::Borrow, ffi::OsStr, fmt::{self, Display, Formatter}, + rc::Rc, thread, }; @@ -81,6 +83,8 @@ pub struct Response { /// Optional headers. Mostly used by HTTP downstream HTTP responses #[builder(setter(into, strip_option), default)] pub headers: Option, + #[builder(default)] + pub flow_control_headers: FlowControlHeaders, #[builder(default = "parse_link_headers")] link_header_processor: fn(&str) -> PageHeader, } @@ -106,51 +110,12 @@ impl Response { .map(|s| s.as_str()) } - pub fn get_page_headers(&self) -> Option { - if let Some(headers) = &self.headers { - match headers.get(LINK_HEADER) { - Some(link) => return Some((self.link_header_processor)(link)), - None => return None, - } - } - None + pub fn get_page_headers(&self) -> Rc> { + self.flow_control_headers.get_page_header() } - // Defaults: - // https://docs.gitlab.com/ee/user/gitlab_com/index.html#gitlabcom-specific-rate-limits - // https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#primary-rate-limit-for-authenticated-users - - // Github 5000 requests per hour for authenticated users - // Gitlab 2000 requests per minute for authenticated users - // Most limiting Github 5000/60 = 83.33 requests per minute - - pub fn get_ratelimit_headers(&self) -> Option { - let mut ratelimit_header = RateLimitHeader::default(); - - // process remote headers and patch the defaults accordingly - if let Some(headers) = &self.headers { - if let Some(retry_after) = headers.get(RETRY_AFTER) { - ratelimit_header.retry_after = - Seconds::new(retry_after.parse::().unwrap_or(0)); - } - if let Some(github_remaining) = headers.get(GITHUB_RATELIMIT_REMAINING) { - ratelimit_header.remaining = github_remaining.parse::().unwrap_or(0); - if let Some(github_reset) = headers.get(GITHUB_RATELIMIT_RESET) { - ratelimit_header.reset = Seconds::new(github_reset.parse::().unwrap_or(0)); - } - log_info!("Header {}", ratelimit_header); - return Some(ratelimit_header); - } - if let Some(gitlab_remaining) = headers.get(GITLAB_RATELIMIT_REMAINING) { - ratelimit_header.remaining = gitlab_remaining.parse::().unwrap_or(0); - if let Some(gitlab_reset) = headers.get(GITLAB_RATELIMIT_RESET) { - ratelimit_header.reset = Seconds::new(gitlab_reset.parse::().unwrap_or(0)); - } - log_info!("Header {}", ratelimit_header); - return Some(ratelimit_header); - } - } - None + pub fn get_ratelimit_headers(&self) -> Rc> { + self.flow_control_headers.get_rate_limit_header() } pub fn get_etag(&self) -> Option<&str> { @@ -223,7 +188,7 @@ pub fn parse_link_headers(link: &str) -> PageHeader { page_header } -#[derive(Default)] +#[derive(Clone, Debug, Default)] pub struct PageHeader { pub next: Option, pub last: Option, @@ -241,9 +206,27 @@ impl PageHeader { pub fn set_last_page(&mut self, page: Page) { self.last = Some(page); } + + pub fn next_page(&self) -> Option<&Page> { + self.next.as_ref() + } + + pub fn last_page(&self) -> Option<&Page> { + self.last.as_ref() + } +} + +pub fn parse_page_headers(headers: Option<&Headers>) -> Option { + if let Some(headers) = headers { + match headers.get(LINK_HEADER) { + Some(link) => return Some(parse_link_headers(link)), + None => return None, + } + } + None } -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct Page { pub url: String, pub number: u32, @@ -256,6 +239,10 @@ impl Page { number, } } + + pub fn url(&self) -> &str { + &self.url + } } // https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#exceeding-the-rate-limit @@ -282,7 +269,7 @@ pub const GITLAB_RATELIMIT_RESET: &str = "ratelimit-reset"; /// Gitlab API ratelimit headers: /// remaining: RateLimit-Remaining /// reset: RateLimit-Reset -#[derive(Clone, Debug, Default)] +#[derive(Clone, Copy, Debug, Default)] pub struct RateLimitHeader { // The number of requests remaining in the current rate limit window. pub remaining: u32, @@ -302,6 +289,42 @@ impl RateLimitHeader { } } +// Defaults: +// https://docs.gitlab.com/ee/user/gitlab_com/index.html#gitlabcom-specific-rate-limits +// https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#primary-rate-limit-for-authenticated-users + +// Github 5000 requests per hour for authenticated users +// Gitlab 2000 requests per minute for authenticated users +// Most limiting Github 5000/60 = 83.33 requests per minute + +pub fn parse_ratelimit_headers(headers: Option<&Headers>) -> Option { + let mut ratelimit_header = RateLimitHeader::default(); + + // process remote headers and patch the defaults accordingly + if let Some(headers) = headers { + if let Some(retry_after) = headers.get(RETRY_AFTER) { + ratelimit_header.retry_after = Seconds::new(retry_after.parse::().unwrap_or(0)); + } + if let Some(github_remaining) = headers.get(GITHUB_RATELIMIT_REMAINING) { + ratelimit_header.remaining = github_remaining.parse::().unwrap_or(0); + if let Some(github_reset) = headers.get(GITHUB_RATELIMIT_RESET) { + ratelimit_header.reset = Seconds::new(github_reset.parse::().unwrap_or(0)); + } + log_info!("Header {}", ratelimit_header); + return Some(ratelimit_header); + } + if let Some(gitlab_remaining) = headers.get(GITLAB_RATELIMIT_REMAINING) { + ratelimit_header.remaining = gitlab_remaining.parse::().unwrap_or(0); + if let Some(gitlab_reset) = headers.get(GITLAB_RATELIMIT_RESET) { + ratelimit_header.reset = Seconds::new(gitlab_reset.parse::().unwrap_or(0)); + } + log_info!("Header {}", ratelimit_header); + return Some(ratelimit_header); + } + } + None +} + impl Display for RateLimitHeader { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let reset = time::epoch_to_minutes_relative(self.reset); @@ -313,6 +336,32 @@ impl Display for RateLimitHeader { } } +#[derive(Clone, Debug, Default)] +pub struct FlowControlHeaders { + page_header: Rc>, + rate_limit_header: Rc>, +} + +impl FlowControlHeaders { + pub fn new( + page_header: Rc>, + rate_limit_header: Rc>, + ) -> Self { + FlowControlHeaders { + page_header, + rate_limit_header, + } + } + + pub fn get_page_header(&self) -> Rc> { + self.page_header.clone() + } + + pub fn get_rate_limit_header(&self) -> Rc> { + self.rate_limit_header.clone() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/src/remote/query.rs b/src/remote/query.rs index dd0eb29..0cfb6bc 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::iter::Iterator; use std::sync::Arc; @@ -10,28 +11,27 @@ use crate::{ api_traits::{ApiOperation, NumberDeltaErr}, display, error, http::{self, Body, Headers, Paginator, Request, Resource}, - io::{HttpRunner, PageHeader, Response}, + io::{HttpRunner, Response}, json_load_page, json_loads, remote::ListBodyArgs, time::sort_filter_by_date, Result, }; -fn get_page_header>( +fn get_remote_resource_headers<'a, R: HttpRunner>( runner: &Arc, url: &str, request_headers: Headers, api_operation: ApiOperation, -) -> Result> { - let response = send_request::<_, String>( +) -> Result { + send_request::<_, String>( runner, url, None, request_headers, http::Method::HEAD, api_operation, - )?; - Ok(response.get_page_headers()) + ) } pub fn num_pages>( @@ -40,10 +40,10 @@ pub fn num_pages>( request_headers: Headers, api_operation: ApiOperation, ) -> Result> { - let page_header = get_page_header(runner, url, request_headers, api_operation)?; - match page_header { + let response = get_remote_resource_headers(runner, url, request_headers, api_operation)?; + match response.get_page_headers().borrow() { Some(page_header) => { - if let Some(last_page) = page_header.last { + if let Some(last_page) = page_header.last_page() { return Ok(Some(last_page.number)); } Ok(None) @@ -60,11 +60,11 @@ pub fn num_resources>( request_headers: Headers, api_operation: ApiOperation, ) -> Result> { - let page_header = get_page_header(runner, url, request_headers, api_operation)?; - match page_header { + let response = get_remote_resource_headers(runner, url, request_headers, api_operation)?; + match response.get_page_headers().borrow() { Some(page_header) => { // total resources per_page * total_pages - if let Some(last_page) = page_header.last { + if let Some(last_page) = page_header.last_page() { let count = last_page.number * page_header.per_page; return Ok(Some(NumberDeltaErr { num: count, From a531e1f5bdfa0def95f65495b3c3d43df013229b Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sun, 13 Oct 2024 17:49:15 -0700 Subject: [PATCH 02/28] Split Response into HttpResponse and ShellResponse Both have different caracteristics and should be treated differently. --- src/backoff.rs | 37 +++++++++----- src/cache.rs | 10 ++-- src/cache/filesystem.rs | 17 ++++--- src/cache/inmemory.rs | 12 ++--- src/cache/nocache.rs | 6 +-- src/cmds/amps.rs | 17 ++++--- src/cmds/merge_request.rs | 27 ++++++----- src/git.rs | 80 +++++++++++++++---------------- src/github/cicd.rs | 8 ++-- src/github/container_registry.rs | 4 +- src/github/gist.rs | 4 +- src/github/merge_request.rs | 6 +-- src/github/project.rs | 8 ++-- src/github/release.rs | 6 +-- src/github/trending.rs | 6 +-- src/github/user.rs | 4 +- src/gitlab/cicd.rs | 8 ++-- src/gitlab/container_registry.rs | 4 +- src/gitlab/gist.rs | 4 +- src/gitlab/merge_request.rs | 6 +-- src/gitlab/project.rs | 8 ++-- src/gitlab/release.rs | 8 ++-- src/gitlab/trending.rs | 4 +- src/gitlab/user.rs | 4 +- src/http.rs | 82 ++++++++++++++++++-------------- src/io.rs | 57 +++++++++++++++------- src/remote.rs | 27 +++++++---- src/remote/query.rs | 71 ++++++++++++++------------- src/shell.rs | 15 +++--- src/test.rs | 28 +++++++---- tests/fs_cache_test.rs | 14 +++--- tests/http_test.rs | 6 +-- 32 files changed, 339 insertions(+), 259 deletions(-) diff --git a/src/backoff.rs b/src/backoff.rs index fe99034..56f3a0c 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -6,7 +6,7 @@ use crate::error::{AddContext, GRError}; use crate::io::{HttpRunner, RateLimitHeader}; use crate::log_error; use crate::{error, log_info, Result}; -use crate::{http::Request, io::Response, time::Seconds}; +use crate::{http::Request, io::HttpResponse, time::Seconds}; /// ExponentialBackoff wraps an HttpRunner and retries requests with an /// exponential backoff retry mechanism. @@ -40,8 +40,11 @@ impl<'a, R> Backoff<'a, R> { } } -impl<'a, R: HttpRunner> Backoff<'a, R> { - pub fn retry_on_error(&mut self, request: &mut Request) -> Result { +impl<'a, R: HttpRunner> Backoff<'a, R> { + pub fn retry_on_error( + &mut self, + request: &mut Request, + ) -> Result { loop { match self.runner.run(request) { Ok(response) => return Ok(response), @@ -124,44 +127,52 @@ impl BackOffStrategy for Exponential { #[cfg(test)] mod tests { + use std::rc::Rc; + use crate::{ http::{self, Headers, Resource}, + io::FlowControlHeaders, test::utils::MockRunner, time::Milliseconds, }; use super::*; - fn ratelimited_with_headers(remaining: u32, reset: u64, retry_after: u64) -> Response { + fn ratelimited_with_headers(remaining: u32, reset: u64, retry_after: u64) -> HttpResponse { let mut headers = Headers::new(); headers.set("x-ratelimit-remaining".to_string(), remaining.to_string()); headers.set("x-ratelimit-reset".to_string(), reset.to_string()); headers.set("retry-after".to_string(), retry_after.to_string()); - Response::builder() + let rate_limit_header = + RateLimitHeader::new(remaining, Seconds::new(reset), Seconds::new(retry_after)); + let flow_control_headers = + FlowControlHeaders::new(Rc::new(None), Rc::new(Some(rate_limit_header))); + HttpResponse::builder() .status(429) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap() } - fn ratelimited_with_no_headers() -> Response { - Response::builder().status(429).build().unwrap() + fn ratelimited_with_no_headers() -> HttpResponse { + HttpResponse::builder().status(429).build().unwrap() } - fn response_ok() -> Response { - Response::builder().status(200).build().unwrap() + fn response_ok() -> HttpResponse { + HttpResponse::builder().status(200).build().unwrap() } - fn response_server_error() -> Response { - Response::builder().status(500).build().unwrap() + fn response_server_error() -> HttpResponse { + HttpResponse::builder().status(500).build().unwrap() } - fn response_transport_error() -> Response { + fn response_transport_error() -> HttpResponse { // Could be a timeout, connection error, etc. Status code // For testing purposes, the status code of -1 simulates a transport // error from the mock http runner. // TODO: Should move to enums instead at some point. - Response::builder().status(-1).build().unwrap() + HttpResponse::builder().status(-1).build().unwrap() } fn now_mock() -> Seconds { diff --git a/src/cache.rs b/src/cache.rs index 26ed62e..b12965b 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -1,4 +1,4 @@ -use crate::io::{Response, ResponseField}; +use crate::io::{HttpResponse, ResponseField}; pub mod filesystem; pub mod inmemory; @@ -10,12 +10,12 @@ pub use nocache::NoCache; pub trait Cache { fn get(&self, key: &K) -> Result; - fn set(&self, key: &K, value: &Response) -> Result<()>; - fn update(&self, key: &K, value: &Response, field: &ResponseField) -> Result<()>; + fn set(&self, key: &K, value: &HttpResponse) -> Result<()>; + fn update(&self, key: &K, value: &HttpResponse, field: &ResponseField) -> Result<()>; } pub enum CacheState { - Stale(Response), - Fresh(Response), + Stale(HttpResponse), + Fresh(HttpResponse), None, } diff --git a/src/cache/filesystem.rs b/src/cache/filesystem.rs index 5da6c51..a61a6ec 100644 --- a/src/cache/filesystem.rs +++ b/src/cache/filesystem.rs @@ -8,7 +8,7 @@ use sha2::{Digest, Sha256}; use crate::cache::Cache; use crate::http::{Headers, Resource}; -use crate::io::{self, Response}; +use crate::io::{self, HttpResponse}; use crate::time::Seconds; use super::CacheState; @@ -88,7 +88,7 @@ impl FileCache { format!("{}/{:x}", location, hash) } - fn get_cache_data(&self, mut reader: impl BufRead) -> Result { + fn get_cache_data(&self, mut reader: impl BufRead) -> Result { let decompressed_data = GzDecoder::new(&mut reader); let mut reader = BufReader::new(decompressed_data); let mut headers = String::new(); @@ -113,7 +113,7 @@ impl FileCache { reader.read_to_end(&mut body)?; let body = String::from_utf8(body)?.trim().to_string(); let headers_map = serde_json::from_str::(&headers)?; - let response = Response::builder() + let response = HttpResponse::builder() .status(status_code) .body(body) .headers(headers_map) @@ -121,7 +121,7 @@ impl FileCache { Ok(response) } - fn persist_cache_data(&self, value: &Response, f: BufWriter) -> Result<()> { + fn persist_cache_data(&self, value: &HttpResponse, f: BufWriter) -> Result<()> { let headers_map = value.headers.as_ref().unwrap(); let headers = serde_json::to_string(headers_map).unwrap(); let status = value.status.to_string(); @@ -173,7 +173,7 @@ impl Cache for FileCache { } } - fn set(&self, key: &Resource, value: &Response) -> Result<()> { + fn set(&self, key: &Resource, value: &HttpResponse) -> Result<()> { let path = self.get_cache_file(&key.url); let f = File::create(path)?; let f = BufWriter::new(f); @@ -181,7 +181,12 @@ impl Cache for FileCache { Ok(()) } - fn update(&self, key: &Resource, value: &Response, field: &io::ResponseField) -> Result<()> { + fn update( + &self, + key: &Resource, + value: &HttpResponse, + field: &io::ResponseField, + ) -> Result<()> { let path = self.get_cache_file(&key.url); if let Ok(f) = File::open(path) { let mut f = BufReader::new(f); diff --git a/src/cache/inmemory.rs b/src/cache/inmemory.rs index 1d48ee1..c13abf0 100644 --- a/src/cache/inmemory.rs +++ b/src/cache/inmemory.rs @@ -3,13 +3,13 @@ use std::{cell::RefCell, collections::HashMap}; use crate::{ cache::{Cache, CacheState}, http::Resource, - io::{Response, ResponseField}, + io::{HttpResponse, ResponseField}, }; use crate::Result; pub struct InMemoryCache { - cache: RefCell>, + cache: RefCell>, expired: bool, pub updated: RefCell, pub updated_field: RefCell, @@ -47,7 +47,7 @@ impl Cache for &InMemoryCache { Ok(CacheState::None) } - fn set(&self, key: &Resource, value: &Response) -> Result<()> { + fn set(&self, key: &Resource, value: &HttpResponse) -> Result<()> { self.cache .borrow_mut() .insert(key.url.to_string(), value.clone()); @@ -57,7 +57,7 @@ impl Cache for &InMemoryCache { fn update( &self, key: &Resource, - value: &Response, + value: &HttpResponse, field: &crate::io::ResponseField, ) -> Result<()> { *self.updated.borrow_mut() = true; @@ -77,7 +77,7 @@ impl Cache for InMemoryCache { Ok(CacheState::None) } - fn set(&self, key: &String, value: &Response) -> Result<()> { + fn set(&self, key: &String, value: &HttpResponse) -> Result<()> { self.cache .borrow_mut() .insert(key.to_string(), value.clone()); @@ -87,7 +87,7 @@ impl Cache for InMemoryCache { fn update( &self, key: &String, - value: &Response, + value: &HttpResponse, _field: &crate::io::ResponseField, ) -> Result<()> { self.set(key, value) diff --git a/src/cache/nocache.rs b/src/cache/nocache.rs index 11f1e54..033b438 100644 --- a/src/cache/nocache.rs +++ b/src/cache/nocache.rs @@ -1,5 +1,5 @@ use crate::cache::{Cache, CacheState}; -use crate::io::{Response, ResponseField}; +use crate::io::{HttpResponse, ResponseField}; use crate::Result; @@ -9,11 +9,11 @@ impl Cache for NoCache { fn get(&self, _key: &K) -> Result { Ok(CacheState::None) } - fn set(&self, _key: &K, _value: &Response) -> Result<()> { + fn set(&self, _key: &K, _value: &HttpResponse) -> Result<()> { Ok(()) } - fn update(&self, _key: &K, _value: &Response, _field: &ResponseField) -> Result<()> { + fn update(&self, _key: &K, _value: &HttpResponse, _field: &ResponseField) -> Result<()> { Ok(()) } } diff --git a/src/cmds/amps.rs b/src/cmds/amps.rs index cabe02d..c3a1962 100644 --- a/src/cmds/amps.rs +++ b/src/cmds/amps.rs @@ -4,7 +4,7 @@ use crate::{ cli::amps::AmpsOptions::{self, Exec}, dialog, error::GRError, - io::{Response, TaskRunner}, + io::{ShellResponse, TaskRunner}, remote::ConfigFilePath, shell, Result, }; @@ -44,7 +44,10 @@ pub fn execute(options: AmpsOptions, config_file: ConfigFilePath) -> Result<()> } } -fn list_amps(runner: impl TaskRunner, amps_path: &str) -> Result> { +fn list_amps( + runner: impl TaskRunner, + amps_path: &str, +) -> Result> { let cmd = vec!["ls", amps_path]; let response = runner.run(cmd)?; if response.body.is_empty() { @@ -89,7 +92,7 @@ impl<'a, A, R> Amp<'a, A, R> { } } -impl<'a, A: Fn() -> String, R: TaskRunner> Amp<'a, A, R> { +impl<'a, A: Fn() -> String, R: TaskRunner> Amp<'a, A, R> { fn exec_amps(&self, amp: String, base_path: &Path) -> Result<()> { let mut args = (self.args_prompter_fn)(); loop { @@ -126,12 +129,12 @@ mod tests { #[test] fn test_exec_amp_with_help_and_run() { - let response_help = Response::builder() + let response_help = ShellResponse::builder() .status(0) .body("this is the help".to_string()) .build() .unwrap(); - let response = Response::builder() + let response = ShellResponse::builder() .status(0) .body("response output".to_string()) .build() @@ -151,7 +154,7 @@ mod tests { #[test] fn test_exec_amp_with_help_and_exit() { - let response_help = Response::builder() + let response_help = ShellResponse::builder() .status(0) .body("this is the help".to_string()) .build() @@ -168,7 +171,7 @@ mod tests { #[test] fn test_list_amps_error_if_none_available() { - let response = Response::builder() + let response = ShellResponse::builder() .status(0) .body("".to_string()) .build() diff --git a/src/cmds/merge_request.rs b/src/cmds/merge_request.rs index dae9158..049a452 100644 --- a/src/cmds/merge_request.rs +++ b/src/cmds/merge_request.rs @@ -4,7 +4,7 @@ use crate::config::ConfigProperties; use crate::display::{Column, DisplayBody}; use crate::error::{AddContext, GRError}; use crate::git::Repo; -use crate::io::{CmdInfo, Response, TaskRunner}; +use crate::io::{CmdInfo, ShellResponse, TaskRunner}; use crate::remote::{CacheCliArgs, CacheType, GetRemoteCliArgs, ListBodyArgs, ListRemoteCliArgs}; use crate::shell::BlockingCommand; use crate::{dialog, display, exec, git, remote, Cmd, Result}; @@ -627,7 +627,7 @@ fn open( fn cmds( remote: Arc, cli_args: &MergeRequestCliArgs, - task_runner: Arc + Send + Sync + 'static>, + task_runner: Arc + Send + Sync + 'static>, reader: Option, ) -> Vec> { let remote_cl = remote.clone(); @@ -1270,11 +1270,11 @@ mod tests { } struct MockShellRunner { - responses: Mutex>, + responses: Mutex>, } impl MockShellRunner { - pub fn new(response: Vec) -> MockShellRunner { + pub fn new(response: Vec) -> MockShellRunner { MockShellRunner { responses: Mutex::new(response), } @@ -1282,7 +1282,7 @@ mod tests { } impl TaskRunner for MockShellRunner { - type Response = Response; + type Response = ShellResponse; fn run(&self, _cmd: T) -> Result where @@ -1290,29 +1290,32 @@ mod tests { T::Item: AsRef, { let response = self.responses.lock().unwrap().pop().unwrap(); - Ok(Response::builder().body(response.body).build().unwrap()) + Ok(ShellResponse::builder() + .body(response.body) + .build() + .unwrap()) } } - fn gen_cmd_responses() -> Vec { + fn gen_cmd_responses() -> Vec { let responses = vec![ - Response::builder() + ShellResponse::builder() .body("fetch cmd".to_string()) .build() .unwrap(), - Response::builder() + ShellResponse::builder() .body("last commit message cmd".to_string()) .build() .unwrap(), - Response::builder() + ShellResponse::builder() .body("current branch cmd".to_string()) .build() .unwrap(), - Response::builder() + ShellResponse::builder() .body("title git cmd".to_string()) .build() .unwrap(), - Response::builder() + ShellResponse::builder() .body("status cmd".to_string()) .build() .unwrap(), diff --git a/src/git.rs b/src/git.rs index dddd16e..ba42325 100644 --- a/src/git.rs +++ b/src/git.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use crate::error; use crate::error::AddContext; use crate::io::CmdInfo; -use crate::io::Response; +use crate::io::ShellResponse; use crate::io::TaskRunner; use crate::remote::RemoteURL; use crate::Result; @@ -24,13 +24,13 @@ use crate::Result; /// /// Takes a [`Runner`] as a parameter and returns a result encapsulating a /// [`CmdInfo::StatusModified`]. Untracked files are not being considered. -pub fn status(exec: Arc>) -> Result { +pub fn status(exec: Arc>) -> Result { let cmd_params = ["git", "status", "--short"]; let response = exec.run(cmd_params)?; handle_git_status(&response) } -fn handle_git_status(response: &Response) -> Result { +fn handle_git_status(response: &ShellResponse) -> Result { let modified = response .body .split('\n') @@ -49,7 +49,7 @@ fn handle_git_status(response: &Response) -> Result { } /// Gather the current branch name in the local git repository. -pub fn current_branch(runner: Arc>) -> Result { +pub fn current_branch(runner: Arc>) -> Result { // Does not work for git version at least 2.18 // let cmd_params = ["git", "branch", "--show-current"]; // Use rev-parse for older versions of git that don't support --show-current. @@ -94,13 +94,13 @@ pub fn commit(exec: &impl TaskRunner, message: &str) -> Result { } /// Get the origin remote url from the local git repository. -pub fn remote_url(exec: &impl TaskRunner) -> Result { +pub fn remote_url(exec: &impl TaskRunner) -> Result { let cmd_params = ["git", "remote", "get-url", "--all", "origin"]; let response = exec.run(cmd_params)?; handle_git_remote_url(&response) } -fn handle_git_remote_url(response: &Response) -> Result { +fn handle_git_remote_url(response: &ShellResponse) -> Result { let fields = response.body.split(':').collect::>(); match fields.len() { // git@github.com:jordilin/gitar.git @@ -150,7 +150,7 @@ fn handle_git_remote_url(response: &Response) -> Result { /// [`Runner`] as a parameter and the encapsulated result is a /// [`CmdInfo::LastCommitSummary`]. pub fn commit_summary( - runner: Arc>, + runner: Arc>, commit: &Option, ) -> Result { let mut cmd_params = vec!["git", "log", "--format=%s", "-n1"]; @@ -162,7 +162,7 @@ pub fn commit_summary( } pub fn outgoing_commits( - runner: &impl TaskRunner, + runner: &impl TaskRunner, remote: &str, default_branch: &str, ) -> Result { @@ -193,7 +193,7 @@ pub fn rebase(runner: &impl TaskRunner, remote_alias: &str) -> Result { } pub fn commit_message( - runner: Arc>, + runner: Arc>, commit: &Option, ) -> Result { let mut cmd_params = vec!["git", "log", "--pretty=format:%b", "-n1"]; @@ -204,7 +204,7 @@ pub fn commit_message( Ok(CmdInfo::CommitMessage(response.body)) } -pub fn checkout(runner: &impl TaskRunner, branch: &str) -> Result<()> { +pub fn checkout(runner: &impl TaskRunner, branch: &str) -> Result<()> { let git_cmd = format!("git checkout origin/{} -b {}", branch, branch); let cmd_params = ["/bin/sh", "-c", &git_cmd]; runner.run(cmd_params).err_context(format!( @@ -273,7 +273,7 @@ mod tests { #[test] fn test_git_repo_has_modified_files() { - let response = Response::builder() + let response = ShellResponse::builder() .body(get_contract( ContractType::Git, "git_status_modified_files.txt", @@ -291,7 +291,7 @@ mod tests { #[test] fn test_git_repo_has_untracked_and_modified_files_is_modified() { - let response = Response::builder() + let response = ShellResponse::builder() .body(get_contract( ContractType::Git, "git_status_untracked_and_modified_files.txt", @@ -309,7 +309,7 @@ mod tests { #[test] fn test_git_status_command_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = Arc::new(MockRunner::new(vec![response])); status(runner.clone()).unwrap(); // assert_eq!("git status --short", runner.cmd.borrow().as_str()); @@ -318,7 +318,7 @@ mod tests { #[test] fn test_git_repo_is_clean() { - let response = Response::builder() + let response = ShellResponse::builder() .body(get_contract(ContractType::Git, "git_status_clean_repo.txt")) .build() .unwrap(); @@ -333,7 +333,7 @@ mod tests { #[test] fn test_git_repo_has_untracked_files_treats_repo_as_no_local_modifications() { - let response = Response::builder() + let response = ShellResponse::builder() .body(get_contract( ContractType::Git, "git_status_untracked_files.txt", @@ -351,7 +351,7 @@ mod tests { #[test] fn test_git_remote_url_cmd_is_correct() { - let response = Response::builder() + let response = ShellResponse::builder() .body("git@github.com:jordilin/mr.git".to_string()) .build() .unwrap(); @@ -362,7 +362,7 @@ mod tests { #[test] fn test_get_remote_git_url() { - let response = Response::builder() + let response = ShellResponse::builder() .body("git@github.com:jordilin/mr.git".to_string()) .build() .unwrap(); @@ -380,7 +380,7 @@ mod tests { #[test] fn test_get_remote_https_url() { - let response = Response::builder() + let response = ShellResponse::builder() .body("https://github.com/jordilin/gitar.git".to_string()) .build() .unwrap(); @@ -398,7 +398,7 @@ mod tests { #[test] fn test_get_remote_ssh_url() { - let response = Response::builder() + let response = ShellResponse::builder() .body("ssh://git@gitlab-web:2222/testgroup/testsubproject.git".to_string()) .build() .unwrap(); @@ -419,7 +419,7 @@ mod tests { #[test] fn test_remote_url_no_remote() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body("error: No such remote 'origin'".to_string()) .build() @@ -430,14 +430,14 @@ mod tests { #[test] fn test_empty_remote_url() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); assert!(remote_url(&runner).is_err()) } #[test] fn test_git_fetch_cmd_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = Arc::new(MockRunner::new(vec![response])); fetch(runner.clone(), "origin".to_string()).unwrap(); assert_eq!("git fetch origin", *runner.cmd()); @@ -445,7 +445,7 @@ mod tests { #[test] fn test_gather_current_branch_cmd_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = Arc::new(MockRunner::new(vec![response])); current_branch(runner.clone()).unwrap(); assert_eq!("git rev-parse --abbrev-ref HEAD", *runner.cmd()); @@ -453,7 +453,7 @@ mod tests { #[test] fn test_gather_current_branch_ok() { - let response = Response::builder() + let response = ShellResponse::builder() .body(get_contract(ContractType::Git, "git_current_branch.txt")) .build() .unwrap(); @@ -468,7 +468,7 @@ mod tests { #[test] fn test_last_commit_summary_cmd_is_correct() { - let response = Response::builder() + let response = ShellResponse::builder() .body("Add README".to_string()) .build() .unwrap(); @@ -479,7 +479,7 @@ mod tests { #[test] fn test_last_commit_summary_get_last_commit() { - let response = Response::builder() + let response = ShellResponse::builder() .body("Add README".to_string()) .build() .unwrap(); @@ -494,7 +494,7 @@ mod tests { #[test] fn test_last_commit_summary_errors() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body("Could not retrieve last commit".to_string()) .build() @@ -505,7 +505,7 @@ mod tests { #[test] fn test_commit_summary_specific_sha_cmd_is_correct() { - let response = Response::builder() + let response = ShellResponse::builder() .body("Add README".to_string()) .build() .unwrap(); @@ -516,7 +516,7 @@ mod tests { #[test] fn test_git_push_cmd_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); let mut repo = Repo::new(); repo.with_current_branch("new_feature"); @@ -526,7 +526,7 @@ mod tests { #[test] fn test_git_push_cmd_fails() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body(get_contract(ContractType::Git, "git_push_failure.txt")) .build() @@ -539,7 +539,7 @@ mod tests { #[test] fn test_git_force_push_cmd_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); let mut repo = Repo::new(); repo.with_current_branch("new_feature"); @@ -571,7 +571,7 @@ mod tests { #[test] fn test_git_rebase_cmd_is_correct() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); rebase(&runner, "origin/main").unwrap(); assert_eq!("git rebase origin/main", *runner.cmd()); @@ -579,7 +579,7 @@ mod tests { #[test] fn test_git_rebase_fails_throws_error() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body(get_contract( ContractType::Git, @@ -593,7 +593,7 @@ mod tests { #[test] fn test_outgoing_commits_cmd_is_ok() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); outgoing_commits(&runner, "origin", "main").unwrap(); let expected_cmd = "git log origin/main.. --reverse --pretty=format:%s - %h %d".to_string(); @@ -602,7 +602,7 @@ mod tests { #[test] fn test_last_commit_message_cmd_is_ok() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = Arc::new(MockRunner::new(vec![response])); commit_message(runner.clone(), &None).unwrap(); let expected_cmd = "git log --pretty=format:%b -n1".to_string(); @@ -611,7 +611,7 @@ mod tests { #[test] fn test_commit_message_from_specific_commit_cmd_is_ok() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = Arc::new(MockRunner::new(vec![response])); commit_message(runner.clone(), &Some("123456".to_string())).unwrap(); let expected_cmd = "git log --pretty=format:%b -n1 123456".to_string(); @@ -620,7 +620,7 @@ mod tests { #[test] fn test_git_add_changes_cmd_is_ok() { - let response = Response::builder().build().unwrap(); + let response = ShellResponse::builder().build().unwrap(); let runner = MockRunner::new(vec![response]); add(&runner).unwrap(); let expected_cmd = "git add -u".to_string(); @@ -629,7 +629,7 @@ mod tests { #[test] fn test_git_add_changes_cmd_is_err() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body("error: could not add changes".to_string()) .build() @@ -640,7 +640,7 @@ mod tests { #[test] fn test_git_commit_message_is_ok() { - let response = Response::builder() + let response = ShellResponse::builder() .body("Add README".to_string()) .build() .unwrap(); @@ -652,7 +652,7 @@ mod tests { #[test] fn test_git_commit_message_is_err() { - let response = Response::builder() + let response = ShellResponse::builder() .status(1) .body("error: could not commit changes".to_string()) .build() diff --git a/src/github/cicd.rs b/src/github/cicd.rs index 9cc27f4..5d443a7 100644 --- a/src/github/cicd.rs +++ b/src/github/cicd.rs @@ -7,11 +7,11 @@ use crate::cmds::cicd::{ use crate::remote::query; use crate::{ api_traits::Cicd, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, }; use crate::{http, time, Result}; -impl> Cicd for Github { +impl> Cicd for Github { fn list(&self, args: PipelineBodyArgs) -> Result> { // Doc: // https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-repository @@ -60,7 +60,7 @@ impl Github { } } -impl> CicdRunner for Github { +impl> CicdRunner for Github { fn list(&self, _args: RunnerListBodyArgs) -> Result> { todo!(); } @@ -82,7 +82,7 @@ impl> CicdRunner for Github { } } -impl> CicdJob for Github { +impl> CicdJob for Github { fn list(&self, _args: JobListBodyArgs) -> Result> { todo!(); } diff --git a/src/github/container_registry.rs b/src/github/container_registry.rs index 839969b..e89440c 100644 --- a/src/github/container_registry.rs +++ b/src/github/container_registry.rs @@ -1,13 +1,13 @@ use crate::{ api_traits::ContainerRegistry, cmds::docker::{DockerListBodyArgs, ImageMetadata, RegistryRepository, RepositoryTag}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, Result, }; use super::Github; -impl> ContainerRegistry for Github { +impl> ContainerRegistry for Github { fn list_repositories(&self, _args: DockerListBodyArgs) -> Result> { todo!("list_repositories") } diff --git a/src/github/gist.rs b/src/github/gist.rs index 89664b5..bb454d9 100644 --- a/src/github/gist.rs +++ b/src/github/gist.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::{ApiOperation, CodeGist, NumberDeltaErr}, cmds::gist::{Gist, GistListBodyArgs}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::{query, URLQueryParamBuilder}, Result, }; @@ -10,7 +10,7 @@ use super::Github; // https://docs.github.com/en/rest/gists/gists?apiVersion=2022-11-28 -impl> CodeGist for Github { +impl> CodeGist for Github { fn list(&self, args: GistListBodyArgs) -> crate::Result> { let url = self.auth_user_gist_url(false); query::paged( diff --git a/src/github/merge_request.rs b/src/github/merge_request.rs index 0276af9..1c4f035 100644 --- a/src/github/merge_request.rs +++ b/src/github/merge_request.rs @@ -11,7 +11,7 @@ use crate::{ project::MrMemberType, }, http::{self, Body}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, json_loads, remote::query, }; @@ -52,7 +52,7 @@ impl Github { } } -impl> MergeRequest for Github { +impl> MergeRequest for Github { fn open(&self, args: MergeRequestBodyArgs) -> Result { // https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#create-a-pull-request let mut body = Body::new(); @@ -355,7 +355,7 @@ impl> MergeRequest for Github { } } -impl> CommentMergeRequest for Github { +impl> CommentMergeRequest for Github { fn create(&self, args: CommentMergeRequestBodyArgs) -> Result<()> { let url = format!( "{}/repos/{}/issues/{}/comments", diff --git a/src/github/project.rs b/src/github/project.rs index d4b694a..4e47ad1 100644 --- a/src/github/project.rs +++ b/src/github/project.rs @@ -3,14 +3,14 @@ use crate::{ cli::browse::BrowseOptions, cmds::project::{Member, Project, ProjectListBodyArgs, Tag}, error::GRError, - io::{CmdInfo, HttpRunner, Response}, + io::{CmdInfo, HttpResponse, HttpRunner}, remote::{query, URLQueryParamBuilder}, }; use super::Github; use crate::Result; -impl> RemoteProject for Github { +impl> RemoteProject for Github { fn get_project_data(&self, id: Option, path: Option<&str>) -> Result { // NOTE: What I call project in here is understood as repository in // Github parlance. In Github there is also the concept of having @@ -109,7 +109,7 @@ impl> RemoteProject for Github { } } -impl> RemoteTag for Github { +impl> RemoteTag for Github { // https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#list-repository-tags fn list(&self, args: ProjectListBodyArgs) -> Result> { let url = self.list_project_url(&args, false); @@ -126,7 +126,7 @@ impl> RemoteTag for Github { } } -impl> ProjectMember for Github { +impl> ProjectMember for Github { fn list(&self, args: ProjectListBodyArgs) -> Result> { let url = &format!( "{}/repos/{}/contributors", diff --git a/src/github/release.rs b/src/github/release.rs index e2cd35f..c3a4495 100644 --- a/src/github/release.rs +++ b/src/github/release.rs @@ -1,14 +1,14 @@ use crate::{ api_traits::{ApiOperation, Deploy, DeployAsset, NumberDeltaErr}, cmds::release::{Release, ReleaseAssetListBodyArgs, ReleaseAssetMetadata, ReleaseBodyArgs}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::query, Result, }; use super::Github; -impl> Deploy for Github { +impl> Deploy for Github { fn list(&self, args: ReleaseBodyArgs) -> Result> { let url = format!("{}/repos/{}/releases", self.rest_api_basepath, self.path); query::paged( @@ -52,7 +52,7 @@ impl Github { } } -impl> DeployAsset for Github { +impl> DeployAsset for Github { fn list(&self, args: ReleaseAssetListBodyArgs) -> Result> { let url = format!( "{}/repos/{}/releases/{}/assets", diff --git a/src/github/trending.rs b/src/github/trending.rs index cfac101..8376210 100644 --- a/src/github/trending.rs +++ b/src/github/trending.rs @@ -4,14 +4,14 @@ use crate::{ api_traits::{ApiOperation, TrendingProjectURL}, cmds::trending::TrendingProject, http::Headers, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::query, Result, }; use super::Github; -impl> TrendingProjectURL for Github { +impl> TrendingProjectURL for Github { fn list(&self, language: String) -> Result> { let url = format!("https://{}/trending/{}", self.domain, language); let mut headers = Headers::new(); @@ -27,7 +27,7 @@ impl> TrendingProjectURL for Github { } } -fn parse_response(response: Response) -> Result> { +fn parse_response(response: HttpResponse) -> Result> { let body = response.body; let proj_re = Regex::new(r#"href="/[a-zA-Z0-9_-]*/[a-zA-Z0-9_-]*/stargazers""#).unwrap(); let description_re = Regex::new(r#"

"#).unwrap(); diff --git a/src/github/user.rs b/src/github/user.rs index a4fc897..40f205b 100644 --- a/src/github/user.rs +++ b/src/github/user.rs @@ -2,11 +2,11 @@ use super::Github; use crate::api_traits::{ApiOperation, UserInfo}; use crate::cmds::project::Member; use crate::cmds::user::UserCliArgs; -use crate::io::{HttpRunner, Response}; +use crate::io::{HttpRunner, HttpResponse}; use crate::remote::query; use crate::Result; -impl> UserInfo for Github { +impl> UserInfo for Github { fn get_auth_user(&self) -> Result { let url = format!("{}/user", self.rest_api_basepath); let user = query::get::<_, (), Member>( diff --git a/src/gitlab/cicd.rs b/src/gitlab/cicd.rs index 6c50f47..4532f5c 100644 --- a/src/gitlab/cicd.rs +++ b/src/gitlab/cicd.rs @@ -8,11 +8,11 @@ use crate::http::{self, Body, Headers}; use crate::remote::{query, URLQueryParamBuilder}; use crate::{ api_traits::Cicd, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, }; use crate::{time, Result}; -impl> Cicd for Gitlab { +impl> Cicd for Gitlab { fn list(&self, args: PipelineBodyArgs) -> Result> { let url = format!("{}/pipelines", self.rest_api_basepath()); query::paged( @@ -57,7 +57,7 @@ impl> Cicd for Gitlab { } } -impl> CicdRunner for Gitlab { +impl> CicdRunner for Gitlab { fn list(&self, args: RunnerListBodyArgs) -> Result> { let url = self.list_runners_url(&args, false); query::paged( @@ -212,7 +212,7 @@ impl From for Job { } } -impl> CicdJob for Gitlab { +impl> CicdJob for Gitlab { // https://docs.gitlab.com/ee/api/jobs.html#list-project-jobs fn list(&self, args: JobListBodyArgs) -> Result> { let url = format!("{}/jobs", self.rest_api_basepath()); diff --git a/src/gitlab/container_registry.rs b/src/gitlab/container_registry.rs index 66fb418..e1a8b95 100644 --- a/src/gitlab/container_registry.rs +++ b/src/gitlab/container_registry.rs @@ -1,14 +1,14 @@ use crate::{ api_traits::{ApiOperation, ContainerRegistry}, cmds::docker::{DockerListBodyArgs, ImageMetadata, RegistryRepository, RepositoryTag}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::query, Result, }; use super::Gitlab; -impl> ContainerRegistry for Gitlab { +impl> ContainerRegistry for Gitlab { fn list_repositories(&self, args: DockerListBodyArgs) -> Result> { let url = format!( "{}/registry/repositories?tags_count=true", diff --git a/src/gitlab/gist.rs b/src/gitlab/gist.rs index 2ed2e6f..5deea73 100644 --- a/src/gitlab/gist.rs +++ b/src/gitlab/gist.rs @@ -1,12 +1,12 @@ use crate::{ api_traits::CodeGist, cmds::gist::{Gist, GistListBodyArgs}, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, }; use super::Gitlab; -impl> CodeGist for Gitlab { +impl> CodeGist for Gitlab { fn list(&self, _args: GistListBodyArgs) -> crate::Result> { todo!() } diff --git a/src/gitlab/merge_request.rs b/src/gitlab/merge_request.rs index afe6fb4..91bfb58 100644 --- a/src/gitlab/merge_request.rs +++ b/src/gitlab/merge_request.rs @@ -12,14 +12,14 @@ use crate::remote::query; use crate::Result; use crate::{ api_traits::MergeRequest, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, }; use crate::json_loads; use super::Gitlab; -impl> MergeRequest for Gitlab { +impl> MergeRequest for Gitlab { fn open(&self, args: MergeRequestBodyArgs) -> Result { let mut body = Body::new(); body.add("source_branch", args.source_branch); @@ -255,7 +255,7 @@ impl Gitlab { } } -impl> CommentMergeRequest for Gitlab { +impl> CommentMergeRequest for Gitlab { fn create(&self, args: CommentMergeRequestBodyArgs) -> Result<()> { let url = format!( "{}/merge_requests/{}/notes", diff --git a/src/gitlab/project.rs b/src/gitlab/project.rs index 552a40d..334ce25 100644 --- a/src/gitlab/project.rs +++ b/src/gitlab/project.rs @@ -3,14 +3,14 @@ use crate::cli::browse::BrowseOptions; use crate::cmds::project::{Member, Project, ProjectListBodyArgs, Tag}; use crate::error::GRError; use crate::gitlab::encode_path; -use crate::io::{CmdInfo, HttpRunner, Response}; +use crate::io::{CmdInfo, HttpRunner, HttpResponse}; use crate::remote::query; use crate::remote::URLQueryParamBuilder; use crate::Result; use super::Gitlab; -impl> RemoteProject for Gitlab { +impl> RemoteProject for Gitlab { fn get_project_data(&self, id: Option, path: Option<&str>) -> Result { let url = match (id, path) { (Some(id), None) => format!("{}/{}", self.base_project_url, id), @@ -94,7 +94,7 @@ impl> RemoteProject for Gitlab { } } -impl> RemoteTag for Gitlab { +impl> RemoteTag for Gitlab { // https://docs.gitlab.com/ee/api/tags.html fn list(&self, args: ProjectListBodyArgs) -> Result> { let url = format!("{}/repository/tags", self.projects_base_url); @@ -116,7 +116,7 @@ impl> RemoteTag for Gitlab { // ApiOperation to reflect this. } -impl> ProjectMember for Gitlab { +impl> ProjectMember for Gitlab { fn list(&self, args: ProjectListBodyArgs) -> Result> { let url = format!("{}/members/all", self.rest_api_basepath()); let members = query::paged( diff --git a/src/gitlab/release.rs b/src/gitlab/release.rs index 60c0171..dc752d3 100644 --- a/src/gitlab/release.rs +++ b/src/gitlab/release.rs @@ -2,14 +2,14 @@ use crate::{ api_traits::{ApiOperation, Deploy, DeployAsset, NumberDeltaErr}, cmds::release::{Release, ReleaseAssetListBodyArgs, ReleaseAssetMetadata, ReleaseBodyArgs}, http, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::query, Result, }; use super::Gitlab; -impl> Deploy for Gitlab { +impl> Deploy for Gitlab { fn list(&self, args: ReleaseBodyArgs) -> Result> { let url = format!("{}/releases", self.rest_api_basepath()); query::paged( @@ -34,7 +34,7 @@ impl> Deploy for Gitlab { } } -impl> Gitlab { +impl> Gitlab { fn resource_release_metadata_url(&self) -> (String, http::Headers) { let url = format!("{}/releases?page=1", self.rest_api_basepath()); let headers = self.headers(); @@ -53,7 +53,7 @@ impl> Gitlab { } } -impl> DeployAsset for Gitlab { +impl> DeployAsset for Gitlab { fn list(&self, args: ReleaseAssetListBodyArgs) -> Result> { let release = self.get_release(args)?; let mut asset_metatadata = Vec::new(); diff --git a/src/gitlab/trending.rs b/src/gitlab/trending.rs index 54bb405..8e30204 100644 --- a/src/gitlab/trending.rs +++ b/src/gitlab/trending.rs @@ -1,13 +1,13 @@ use crate::{ api_traits::TrendingProjectURL, cmds::trending::TrendingProject, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, Result, }; use super::Gitlab; -impl> TrendingProjectURL for Gitlab { +impl> TrendingProjectURL for Gitlab { fn list(&self, _language: String) -> Result> { unimplemented!() } diff --git a/src/gitlab/user.rs b/src/gitlab/user.rs index dc7d799..a3061d3 100644 --- a/src/gitlab/user.rs +++ b/src/gitlab/user.rs @@ -2,14 +2,14 @@ use crate::{ api_traits::{ApiOperation, UserInfo}, cmds::{project::Member, user::UserCliArgs}, error::GRError, - io::{HttpRunner, Response}, + io::{HttpRunner, HttpResponse}, remote::{self, query}, Result, }; use super::Gitlab; -impl> UserInfo for Gitlab { +impl> UserInfo for Gitlab { fn get_auth_user(&self) -> Result { let user = query::get::<_, (), _>( &self.runner, diff --git a/src/http.rs b/src/http.rs index 4bb14b3..dcc39b6 100644 --- a/src/http.rs +++ b/src/http.rs @@ -4,8 +4,8 @@ use crate::cache::{Cache, CacheState}; use crate::config::ConfigProperties; use crate::error::GRError; use crate::io::{ - parse_link_headers, parse_page_headers, parse_ratelimit_headers, FlowControlHeaders, - HttpRunner, RateLimitHeader, Response, ResponseField, + parse_page_headers, parse_ratelimit_headers, FlowControlHeaders, HttpResponse, HttpRunner, + RateLimitHeader, ResponseField, }; use crate::time::{self, now_epoch_seconds, Milliseconds, Seconds}; use crate::{api_defaults, error, log_debug, log_error}; @@ -40,7 +40,7 @@ impl Client { } } - fn submit(&self, request: &Request) -> Result { + fn submit(&self, request: &Request) -> Result { let ureq_req = match request.method { Method::GET => ureq::get(request.url()), Method::HEAD => ureq::head(request.url()), @@ -79,7 +79,7 @@ impl Client { // log debug response headers log_debug!("Response headers: {:?}", headers); let body = response.into_string().unwrap_or_default(); - let response = Response::builder() + let response = HttpResponse::builder() .status(status) .body(body) .headers(headers) @@ -95,7 +95,7 @@ impl Client { } impl Client { - fn handle_rate_limit(&self, response: &Response) -> Result<()> { + fn handle_rate_limit(&self, response: &HttpResponse) -> Result<()> { if let Some(headers) = response.get_ratelimit_headers().borrow() { if headers.remaining <= self.config.rate_limit_remaining_threshold() { log_error!("Rate limit threshold reached"); @@ -309,12 +309,12 @@ pub enum Method { } impl> HttpRunner for Client { - type Response = Response; + type Response = HttpResponse; fn run(&self, cmd: &mut Request) -> Result { match cmd.method { Method::GET => { - let mut default_response = Response::builder().build().unwrap(); + let mut default_response = HttpResponse::builder().build().unwrap(); match self.cache.get(&cmd.resource) { Ok(CacheState::Fresh(response)) => { log_debug!("Cache fresh for {}", cmd.resource.url); @@ -399,8 +399,8 @@ impl<'a, R, T> Paginator<'a, R, T> { } } -impl<'a, T: Serialize, R: HttpRunner> Iterator for Paginator<'a, R, T> { - type Item = Result; +impl<'a, T: Serialize, R: HttpRunner> Iterator for Paginator<'a, R, T> { + type Item = Result; fn next(&mut self) -> Option { if let Some(page_url) = &self.page_url { @@ -459,37 +459,41 @@ mod test { test::utils::{init_test_logger, ConfigMock, MockRunner, LOG_BUFFER}, }; - fn header_processor_next_page_no_last(_header: &str) -> PageHeader { + fn header_processor_next_page_no_last() -> Rc> { let mut page_header = PageHeader::new(); page_header.set_next_page(Page::new("http://localhost?page=2", 1)); - page_header + Rc::new(Some(page_header)) } - fn header_processor_last_page_no_next(_header: &str) -> PageHeader { + fn header_processor_last_page_no_next() -> Rc> { let mut page_header = PageHeader::new(); page_header.set_last_page(Page::new("http://localhost?page=2", 1)); - page_header + Rc::new(Some(page_header)) } - fn response_with_next_page() -> Response { + fn response_with_next_page() -> HttpResponse { let mut headers = Headers::new(); headers.set("link".to_string(), "http://localhost?page=2".to_string()); - let response = Response::builder() + let flow_control_headers = + FlowControlHeaders::new(header_processor_next_page_no_last(), Rc::new(None)); + let response = HttpResponse::builder() .status(200) .headers(headers) - .link_header_processor(header_processor_next_page_no_last) + .flow_control_headers(flow_control_headers) .build() .unwrap(); response } - fn response_with_last_page() -> Response { + fn response_with_last_page() -> HttpResponse { let mut headers = Headers::new(); headers.set("link".to_string(), "http://localhost?page=2".to_string()); - let response = Response::builder() + let flow_control_headers = + FlowControlHeaders::new(header_processor_last_page_no_next(), Rc::new(None)); + let response = HttpResponse::builder() .status(200) .headers(headers) - .link_header_processor(header_processor_last_page_no_next) + .flow_control_headers(flow_control_headers) .build() .unwrap(); response @@ -497,11 +501,11 @@ mod test { #[test] fn test_paginator_no_headers_no_next_no_last_pages() { - let response = Response::builder().status(200).build().unwrap(); + let response = HttpResponse::builder().status(200).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); } @@ -511,16 +515,15 @@ mod test { let response1 = response_with_next_page(); let mut headers = Headers::new(); headers.set("link".to_string(), "http://localhost?page=2".to_string()); - let response2 = Response::builder() + let response2 = HttpResponse::builder() .status(200) .headers(headers) - .link_header_processor(|_header| PageHeader::new()) .build() .unwrap(); let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -531,13 +534,13 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } #[test] fn test_paginator_error_response() { - let response = Response::builder() + let response = HttpResponse::builder() .status(500) .body("Internal Server Error".to_string()) .build() @@ -545,7 +548,7 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); } @@ -570,7 +573,7 @@ mod test { ); let request: Request<()> = Request::new("http://localhost", Method::GET); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -588,7 +591,7 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let client = Arc::new(MockRunner::new(responses)); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(REST_API_MAX_PAGES, responses.len() as u32); } @@ -596,9 +599,18 @@ mod test { fn test_ratelimit_remaining_threshold_reached_is_error() { let mut headers = Headers::new(); headers.set("x-ratelimit-remaining".to_string(), "10".to_string()); - let response = Response::builder() + let flow_control_headers = FlowControlHeaders::new( + Rc::new(None), + Rc::new(Some(RateLimitHeader::new( + 10, + Seconds::new(60), + Seconds::new(60), + ))), + ); + let response = HttpResponse::builder() .status(200) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap(); let client = Client::new(cache::NoCache, Arc::new(ConfigMock::new(1)), false); @@ -609,7 +621,7 @@ mod test { fn test_ratelimit_remaining_threshold_not_reached_is_ok() { let mut headers = Headers::new(); headers.set("ratelimit-remaining".to_string(), "11".to_string()); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .headers(headers) .build() @@ -699,7 +711,7 @@ mod test { .build() .unwrap(); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -720,7 +732,7 @@ mod test { 0, 60, ); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); let buffer = LOG_BUFFER.lock().unwrap(); assert!(buffer.contains("Throttling for: 1 ms")); @@ -743,7 +755,7 @@ mod test { 0, 60, ); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); let buffer = LOG_BUFFER.lock().unwrap(); assert!(buffer.contains("Throttling between: 1 ms and 3 ms")); @@ -770,7 +782,7 @@ mod test { .build() .unwrap(); let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); - let responses = paginator.collect::>>(); + let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } } diff --git a/src/io.rs b/src/io.rs index 10040ac..0e46ce8 100644 --- a/src/io.rs +++ b/src/io.rs @@ -13,7 +13,6 @@ use crate::{ use regex::Regex; use serde::Serialize; use std::{ - borrow::Borrow, ffi::OsStr, fmt::{self, Display, Formatter}, rc::Rc, @@ -73,9 +72,23 @@ pub enum CmdInfo { Exit, } +#[derive(Clone, Debug, Builder)] +pub struct ShellResponse { + #[builder(default)] + pub status: i32, + #[builder(default)] + pub body: String, +} + +impl ShellResponse { + pub fn builder() -> ShellResponseBuilder { + ShellResponseBuilder::default() + } +} + /// Adapts lower level I/O HTTP/Shell outputs to a common Response. #[derive(Clone, Debug, Builder)] -pub struct Response { +pub struct HttpResponse { #[builder(default)] pub status: i32, #[builder(default)] @@ -83,15 +96,13 @@ pub struct Response { /// Optional headers. Mostly used by HTTP downstream HTTP responses #[builder(setter(into, strip_option), default)] pub headers: Option, - #[builder(default)] + #[builder(setter(into, strip_option), default)] pub flow_control_headers: FlowControlHeaders, - #[builder(default = "parse_link_headers")] - link_header_processor: fn(&str) -> PageHeader, } -impl Response { - pub fn builder() -> ResponseBuilder { - ResponseBuilder::default() +impl HttpResponse { + pub fn builder() -> HttpResponseBuilder { + HttpResponseBuilder::default() } } @@ -102,7 +113,7 @@ pub enum ResponseField { Headers, } -impl Response { +impl HttpResponse { pub fn header(&self, key: &str) -> Option<&str> { self.headers .as_ref() @@ -373,9 +384,13 @@ mod test { headers.set("x-ratelimit-remaining".to_string(), "30".to_string()); headers.set("x-ratelimit-reset".to_string(), "1658602270".to_string()); headers.set("retry-after".to_string(), "60".to_string()); - let response = Response::builder() + let rate_limit_header = parse_ratelimit_headers(Some(&headers)).unwrap(); + let flow_control_headers = + FlowControlHeaders::new(Rc::new(None), Rc::new(Some(rate_limit_header))); + let response = HttpResponse::builder() .body(body.to_string()) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap(); let ratelimit_headers = response.get_ratelimit_headers().unwrap(); @@ -391,9 +406,13 @@ mod test { headers.set("ratelimit-remaining".to_string(), "30".to_string()); headers.set("ratelimit-reset".to_string(), "1658602270".to_string()); headers.set("retry-after".to_string(), "60".to_string()); - let response = Response::builder() + let rate_limit_header = parse_ratelimit_headers(Some(&headers)).unwrap(); + let flow_control_headers = + FlowControlHeaders::new(Rc::new(None), Rc::new(Some(rate_limit_header))); + let response = HttpResponse::builder() .body(body.to_string()) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap(); let ratelimit_headers = response.get_ratelimit_headers().unwrap(); @@ -409,9 +428,13 @@ mod test { headers.set("RateLimit-remaining".to_string(), "30".to_string()); headers.set("rateLimit-reset".to_string(), "1658602270".to_string()); headers.set("Retry-After".to_string(), "60".to_string()); - let response = Response::builder() + let rate_limit_header = parse_ratelimit_headers(Some(&headers)); + let flow_control_headers = + FlowControlHeaders::new(Rc::new(None), Rc::new(rate_limit_header)); + let response = HttpResponse::builder() .body(body.to_string()) .headers(headers) + .flow_control_headers(flow_control_headers) .build() .unwrap(); let ratelimit_headers = response.get_ratelimit_headers(); @@ -451,7 +474,7 @@ mod test { #[test] fn test_response_ok_status_get_request_200() { - assert!(Response::builder() + assert!(HttpResponse::builder() .status(200) .build() .unwrap() @@ -462,14 +485,14 @@ mod test { fn test_response_not_ok_if_get_request_400s() { let not_ok_status = 400..=499; for status in not_ok_status { - let response = Response::builder().status(status).build().unwrap(); + let response = HttpResponse::builder().status(status).build().unwrap(); assert!(!response.is_ok(&http::Method::GET)); } } #[test] fn test_response_ok_status_post_request_201() { - assert!(Response::builder() + assert!(HttpResponse::builder() .status(201) .build() .unwrap() @@ -481,7 +504,7 @@ mod test { // special case handled by the caller (merge_request) let not_ok_status = [409, 422]; for status in not_ok_status.iter() { - let response = Response::builder().status(*status).build().unwrap(); + let response = HttpResponse::builder().status(*status).build().unwrap(); assert!(response.is_ok(&http::Method::POST)); } } @@ -497,7 +520,7 @@ mod test { let not_ok_status = 500..=599; for status in not_ok_status { for method in methods.iter() { - let response = Response::builder().status(status).build().unwrap(); + let response = HttpResponse::builder().status(status).build().unwrap(); assert!(!response.is_ok(method)); } } diff --git a/src/remote.rs b/src/remote.rs index 5f120a5..5b93a93 100644 --- a/src/remote.rs +++ b/src/remote.rs @@ -13,7 +13,7 @@ use crate::display::Format; use crate::error::GRError; use crate::github::Github; use crate::gitlab::Gitlab; -use crate::io::{CmdInfo, HttpRunner, Response, TaskRunner}; +use crate::io::{CmdInfo, HttpResponse, HttpRunner, ShellResponse, TaskRunner}; use crate::time::Milliseconds; use crate::{cli, error, get_default_config_path, http, log_debug, log_info}; use crate::{git, Result}; @@ -391,7 +391,7 @@ macro_rules! get { runner: Arc, ) -> Result> where - R: HttpRunner + Send + Sync + 'static, + R: HttpRunner + Send + Sync + 'static, { let github_domain_regex = regex::Regex::new(r"^github").unwrap(); let gitlab_domain_regex = regex::Regex::new(r"^gitlab").unwrap(); @@ -485,7 +485,7 @@ impl RemoteURL { } impl CliDomainRequirements { - pub fn check>( + pub fn check>( &self, cli_args: &cli::CliArgs, runner: &R, @@ -547,7 +547,7 @@ impl Display for CliDomainRequirements { } } -pub fn url>( +pub fn url>( cli_args: &cli::CliArgs, requirements: &[CliDomainRequirements], runner: &R, @@ -1052,7 +1052,7 @@ mod test { #[test] fn test_cli_requires_cd_local_repo_run_git_remote() { let cli_args = CliArgs::new(0, None, None, None); - let response = Response::builder() + let response = ShellResponse::builder() .body("git@github.com:jordilin/gitar.git".to_string()) .build() .unwrap(); @@ -1066,7 +1066,10 @@ mod test { #[test] fn test_cli_requires_cd_local_repo_run_git_remote_error() { let cli_args = CliArgs::new(0, None, None, None); - let response = Response::builder().body("".to_string()).build().unwrap(); + let response = ShellResponse::builder() + .body("".to_string()) + .build() + .unwrap(); let runner = MockRunner::new(vec![response]); let requirements = vec![CliDomainRequirements::CdInLocalRepo]; let result = url(&cli_args, &requirements, &runner, &None); @@ -1086,7 +1089,10 @@ mod test { CliDomainRequirements::CdInLocalRepo, CliDomainRequirements::RepoArgs, ]; - let response = Response::builder().body("".to_string()).build().unwrap(); + let response = ShellResponse::builder() + .body("".to_string()) + .build() + .unwrap(); let url = url( &cli_args, &requirements, @@ -1106,7 +1112,10 @@ mod test { CliDomainRequirements::CdInLocalRepo, CliDomainRequirements::DomainArgs, ]; - let response = Response::builder().body("".to_string()).build().unwrap(); + let response = ShellResponse::builder() + .body("".to_string()) + .build() + .unwrap(); let url = url( &cli_args, &requirements, @@ -1155,7 +1164,7 @@ mod test { let cli_args = CliArgs::default(); // Huck Finn opens a PR from a forked repo over to the main repo // jordilin/gitar - let response = Response::builder() + let response = ShellResponse::builder() .body("git@github.com:hfinn/gitar.git".to_string()) .build() .unwrap(); diff --git a/src/remote/query.rs b/src/remote/query.rs index 0cfb6bc..9d322d0 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -11,19 +11,19 @@ use crate::{ api_traits::{ApiOperation, NumberDeltaErr}, display, error, http::{self, Body, Headers, Paginator, Request, Resource}, - io::{HttpRunner, Response}, + io::{HttpResponse, HttpRunner}, json_load_page, json_loads, remote::ListBodyArgs, time::sort_filter_by_date, Result, }; -fn get_remote_resource_headers<'a, R: HttpRunner>( +fn get_remote_resource_headers<'a, R: HttpRunner>( runner: &Arc, url: &str, request_headers: Headers, api_operation: ApiOperation, -) -> Result { +) -> Result { send_request::<_, String>( runner, url, @@ -34,7 +34,7 @@ fn get_remote_resource_headers<'a, R: HttpRunner>( ) } -pub fn num_pages>( +pub fn num_pages>( runner: &Arc, url: &str, request_headers: Headers, @@ -54,7 +54,7 @@ pub fn num_pages>( } } -pub fn num_resources>( +pub fn num_resources>( runner: &Arc, url: &str, request_headers: Headers, @@ -84,14 +84,14 @@ pub fn num_resources>( } } -pub fn query_error(url: &str, response: &Response) -> error::GRError { +pub fn query_error(url: &str, response: &HttpResponse) -> error::GRError { error::GRError::RemoteServerError(format!( "Failed to submit request to URL: {} with status code: {} and body: {}", url, response.status, response.body )) } -pub fn send, D: Serialize, T>( +pub fn send, D: Serialize, T>( runner: &Arc, url: &str, body: Option<&Body>, @@ -105,7 +105,7 @@ pub fn send, D: Serialize, T>( Ok(mapper(&body)) } -pub fn send_json, D: Serialize>( +pub fn send_json, D: Serialize>( runner: &Arc, url: &str, body: Option<&Body>, @@ -117,18 +117,18 @@ pub fn send_json, D: Serialize>( json_loads(&response.body) } -pub fn send_raw, D: Serialize>( +pub fn send_raw, D: Serialize>( runner: &Arc, url: &str, body: Option<&Body>, request_headers: Headers, operation: ApiOperation, method: http::Method, -) -> Result { +) -> Result { send_request(runner, url, body, request_headers, method, operation) } -pub fn get, D: Serialize, T>( +pub fn get, D: Serialize, T>( runner: &Arc, url: &str, body: Option<&Body>, @@ -148,7 +148,7 @@ pub fn get, D: Serialize, T>( Ok(mapper(&body)) } -pub fn get_json, D: Serialize>( +pub fn get_json, D: Serialize>( runner: &Arc, url: &str, body: Option<&Body>, @@ -166,13 +166,13 @@ pub fn get_json, D: Serialize>( json_loads(&response.body) } -pub fn get_raw, D: Serialize>( +pub fn get_raw, D: Serialize>( runner: &Arc, url: &str, body: Option<&Body>, request_headers: Headers, operation: ApiOperation, -) -> Result { +) -> Result { send_request( runner, url, @@ -183,14 +183,14 @@ pub fn get_raw, D: Serialize>( ) } -fn send_request, T: Serialize>( +fn send_request, T: Serialize>( runner: &Arc, url: &str, body: Option<&Body>, request_headers: Headers, method: http::Method, operation: ApiOperation, -) -> Result { +) -> Result { let mut request = if let Some(body) = body { http::Request::builder() .method(method.clone()) @@ -227,7 +227,7 @@ pub fn paged( mapper: impl Fn(&serde_json::Value) -> T, ) -> Result> where - R: HttpRunner, + R: HttpRunner, T: Clone + Timestamp + Into, { let request = build_list_request(url, &list_args, request_headers, operation); @@ -338,13 +338,18 @@ fn build_list_request<'a>( #[cfg(test)] mod test { - use crate::{io::Page, test::utils::MockRunner}; + use std::rc::Rc; + + use crate::{ + io::{FlowControlHeaders, Page, PageHeader}, + test::utils::MockRunner, + }; use super::*; #[test] fn test_numpages_assume_one_if_pages_not_available() { - let response = Response::builder().status(200).build().unwrap(); + let response = HttpResponse::builder().status(200).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let url = "https://github.com/api/v4/projects/1/pipelines"; let headers = Headers::new(); @@ -355,7 +360,7 @@ mod test { #[test] fn test_numpages_error_on_404() { - let response = Response::builder().status(404).build().unwrap(); + let response = HttpResponse::builder().status(404).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let url = "https://github.com/api/v4/projects/1/pipelines"; let headers = Headers::new(); @@ -366,7 +371,7 @@ mod test { #[test] fn test_num_resources_assume_one_if_pages_not_available() { let headers = Headers::new(); - let response = Response::builder().status(200).build().unwrap(); + let response = HttpResponse::builder().status(200).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let url = "https://github.com/api/v4/projects/1/pipelines?page=1"; let num_resources = num_resources(&client, url, headers, ApiOperation::Pipeline).unwrap(); @@ -379,20 +384,18 @@ mod test { // Value doesn't matter as we are injecting the header processor // enforcing the last page and per_page values. headers.set("link", ""); - let response = Response::builder() + let next_page = Page::new("https://gitlab.com/api/v4/projects/1/pipelines?page=2", 2); + let last_page = Page::new("https://gitlab.com/api/v4/projects/1/pipelines?page=4", 4); + let mut page_header = PageHeader::new(); + page_header.set_next_page(next_page); + page_header.set_last_page(last_page); + page_header.per_page = 20; + let flow_control_header = + FlowControlHeaders::new(Rc::new(Some(page_header)), Rc::new(None)); + let response = HttpResponse::builder() .status(200) .headers(headers) - .link_header_processor(|_link| { - let mut page_header = PageHeader::new(); - let next_page = - Page::new("https://gitlab.com/api/v4/projects/1/pipelines?page=2", 2); - let last_page = - Page::new("https://gitlab.com/api/v4/projects/1/pipelines?page=4", 4); - page_header.set_next_page(next_page); - page_header.set_last_page(last_page); - page_header.per_page = 20; - page_header - }) + .flow_control_headers(flow_control_header) .build() .unwrap(); let client = Arc::new(MockRunner::new(vec![response])); @@ -406,7 +409,7 @@ mod test { #[test] fn test_numresources_error_on_404() { - let response = Response::builder().status(404).build().unwrap(); + let response = HttpResponse::builder().status(404).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let url = "https://github.com/api/v4/projects/1/pipelines"; let headers = Headers::new(); diff --git a/src/shell.rs b/src/shell.rs index f422604..446c1e4 100644 --- a/src/shell.rs +++ b/src/shell.rs @@ -1,5 +1,5 @@ use crate::error; -use crate::io::Response; +use crate::io::ShellResponse; use crate::io::TaskRunner; use crate::Result; use std::ffi::OsStr; @@ -13,7 +13,7 @@ use std::thread; pub struct BlockingCommand; impl TaskRunner for BlockingCommand { - type Response = Response; + type Response = ShellResponse; fn run(&self, cmd: T) -> Result where @@ -24,7 +24,7 @@ impl TaskRunner for BlockingCommand { } } -fn run_args(args: T) -> Result +fn run_args(args: T) -> Result where T: IntoIterator, T::Item: AsRef, @@ -32,7 +32,7 @@ where let args: Vec<_> = args.into_iter().collect(); let mut process = process::Command::new(&args[0]); process.args(&args[1..]); - let mut response_builder = Response::builder(); + let mut response_builder = ShellResponse::builder(); match process.output() { Ok(output) => { let status_code = output.status.code().unwrap_or(0); @@ -59,7 +59,7 @@ where pub struct StreamingCommand; impl TaskRunner for StreamingCommand { - type Response = Response; + type Response = ShellResponse; fn run(&self, cmd: T) -> Result where @@ -94,7 +94,10 @@ impl TaskRunner for StreamingCommand { stdout_handle.join().unwrap(); stderr_handle.join().unwrap(); let _ = child.wait()?; - Ok(Response::builder().status(0).body("".to_string()).build()?) + Ok(ShellResponse::builder() + .status(0) + .body("".to_string()) + .build()?) } } diff --git a/src/test.rs b/src/test.rs index c735420..6476920 100644 --- a/src/test.rs +++ b/src/test.rs @@ -6,7 +6,7 @@ pub mod utils { config::ConfigProperties, error, http::{self, Headers, Request}, - io::{HttpRunner, Response, TaskRunner}, + io::{HttpResponse, HttpRunner, ShellResponse, TaskRunner}, time::Milliseconds, Result, }; @@ -47,8 +47,8 @@ pub mod utils { contents } - pub struct MockRunner { - responses: RefCell>, + pub struct MockRunner { + responses: RefCell>, cmd: RefCell, headers: RefCell, url: RefCell, @@ -61,8 +61,8 @@ pub mod utils { pub request_body: RefCell, } - impl MockRunner { - pub fn new(responses: Vec) -> Self { + impl MockRunner { + pub fn new(responses: Vec) -> Self { Self { responses: RefCell::new(responses), cmd: RefCell::new(String::new()), @@ -107,8 +107,8 @@ pub mod utils { } } - impl TaskRunner for MockRunner { - type Response = Response; + impl TaskRunner for MockRunner { + type Response = ShellResponse; fn run(&self, cmd: T) -> Result where @@ -130,8 +130,8 @@ pub mod utils { } } - impl HttpRunner for MockRunner { - type Response = Response; + impl HttpRunner for MockRunner { + type Response = HttpResponse; fn run(&self, cmd: &mut Request) -> Result { self.url.replace(cmd.url().to_string()); @@ -306,10 +306,18 @@ pub mod utils { .into_iter() .map(|(status_code, get_contract_fn, headers)| { let body = get_contract_fn(); - let mut response = Response::builder(); + let mut response = HttpResponse::builder(); response.status(status_code); if headers.is_some() { response.headers(headers.clone().unwrap()); + let rate_limit_header = + crate::io::parse_ratelimit_headers(headers.as_ref()); + let link_header = crate::io::parse_page_headers(headers.as_ref()); + let flow_control_headers = crate::io::FlowControlHeaders::new( + std::rc::Rc::new(link_header), + std::rc::Rc::new(rate_limit_header), + ); + response.flow_control_headers(flow_control_headers); } if body.is_some() { response.body(body.unwrap()); diff --git a/tests/fs_cache_test.rs b/tests/fs_cache_test.rs index 2467b36..d858f95 100644 --- a/tests/fs_cache_test.rs +++ b/tests/fs_cache_test.rs @@ -8,7 +8,7 @@ use gr::{ cache::{filesystem::FileCache, Cache, CacheState}, config::ConfigProperties, http::{Headers, Resource}, - io::{Response, ResponseField}, + io::{HttpResponse, ResponseField}, }; struct TestConfig { @@ -41,7 +41,7 @@ fn test_file_cache_fresh() { let resource = Resource::new("https://api.example.com/test", Some(ApiOperation::Project)); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body("Test response body".to_string()) .headers({ @@ -90,7 +90,7 @@ fn test_file_cache_stale_user_expired_no_cache() { let resource = Resource::new("https://api.example.com/test", Some(ApiOperation::Project)); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body("Test response body".to_string()) .headers({ @@ -136,7 +136,7 @@ fn test_file_cache_fresh_user_expired_but_under_max_age() { let resource = Resource::new("https://api.example.com/test", Some(ApiOperation::Project)); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body("Test response body".to_string()) .headers({ @@ -185,7 +185,7 @@ fn test_cache_stale_both_user_expired_and_max_aged_expired() { let resource = Resource::new("https://api.example.com/test", Some(ApiOperation::Project)); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body("Test response body".to_string()) .headers({ @@ -228,7 +228,7 @@ fn test_cache_update_refreshes_cache() { let resource = Resource::new("https://api.example.com/test", Some(ApiOperation::Project)); // First response - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body("Test response body".to_string()) .headers({ @@ -242,7 +242,7 @@ fn test_cache_update_refreshes_cache() { file_cache.set(&resource, &response).unwrap(); // Second call, we get a 304 response - let new_response = Response::builder() + let new_response = HttpResponse::builder() .status(304) .headers(Headers::new()) .build() diff --git a/tests/http_test.rs b/tests/http_test.rs index 8c4ae9e..d9c8aae 100644 --- a/tests/http_test.rs +++ b/tests/http_test.rs @@ -4,7 +4,7 @@ use gr::cache::{Cache, InMemoryCache, NoCache}; use gr::config::ConfigProperties; use gr::error::GRError; use gr::http::{Client, Headers, Method, Request}; -use gr::io::{HttpRunner, Response, ResponseField}; +use gr::io::{HttpRunner, HttpResponse, ResponseField}; use httpmock::prelude::*; use httpmock::Method::{GET, HEAD, PATCH, POST}; @@ -120,7 +120,7 @@ fn test_http_gathers_from_inmemory_fresh_cache() { "default_branch": "main", }"#; - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body(body_str.to_string()) .build() @@ -180,7 +180,7 @@ fn test_http_gathers_from_inmemory_stale_cache_server_304() { let mut headers = Headers::new(); headers.set("etag".to_string(), "1234".to_string()); headers.set("Max-Age".to_string(), "0".to_string()); - let response = Response::builder() + let response = HttpResponse::builder() .status(200) .body(body_str.to_string()) .headers(headers) From 8a063bb457cdab0172e616469736e986aa0dce3e Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Mon, 14 Oct 2024 23:51:06 -0700 Subject: [PATCH 03/28] Cache fix: parse flowcontrol headers --- src/cache/filesystem.rs | 10 +++++++++- src/io.rs | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/cache/filesystem.rs b/src/cache/filesystem.rs index a61a6ec..7ab7301 100644 --- a/src/cache/filesystem.rs +++ b/src/cache/filesystem.rs @@ -1,6 +1,7 @@ use std::fs::{self, File}; use std::io::{BufRead, BufReader, BufWriter, Read, Write}; use std::path::Path; +use std::rc::Rc; use std::sync::Arc; use flate2::bufread::GzDecoder; @@ -8,7 +9,7 @@ use sha2::{Digest, Sha256}; use crate::cache::Cache; use crate::http::{Headers, Resource}; -use crate::io::{self, HttpResponse}; +use crate::io::{self, FlowControlHeaders, HttpResponse}; use crate::time::Seconds; use super::CacheState; @@ -113,10 +114,17 @@ impl FileCache { reader.read_to_end(&mut body)?; let body = String::from_utf8(body)?.trim().to_string(); let headers_map = serde_json::from_str::(&headers)?; + // Gather cached link headers for pagination. + // We don't need rate limit headers as we are not querying the API at + // this point. + let page_header = io::parse_page_headers(Some(&headers_map)); + let flow_control_headers = FlowControlHeaders::new(Rc::new(page_header), Rc::new(None)); + let response = HttpResponse::builder() .status(status_code) .body(body) .headers(headers_map) + .flow_control_headers(flow_control_headers) .build()?; Ok(response) } diff --git a/src/io.rs b/src/io.rs index 0e46ce8..b4665f0 100644 --- a/src/io.rs +++ b/src/io.rs @@ -96,7 +96,7 @@ pub struct HttpResponse { /// Optional headers. Mostly used by HTTP downstream HTTP responses #[builder(setter(into, strip_option), default)] pub headers: Option, - #[builder(setter(into, strip_option), default)] + #[builder(setter(into), default)] pub flow_control_headers: FlowControlHeaders, } From 4b7eff6b68fc2cec339a68a2b18d29ad5fa141c5 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Tue, 15 Oct 2024 00:27:02 -0700 Subject: [PATCH 04/28] http: throttle after first request --- src/http.rs | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/http.rs b/src/http.rs index dcc39b6..fa40943 100644 --- a/src/http.rs +++ b/src/http.rs @@ -413,36 +413,36 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi } if self.iter >= 1 { self.request.set_url(page_url); + if let Some(throttle_time) = self.throttle_time { + log_info!("Throttling for: {} ms", throttle_time); + self.runner.throttle(throttle_time); + } else if let Some((min, max)) = self.throttle_range { + log_info!("Throttling between: {} ms and {} ms", min, max); + self.runner.throttle_range(min, max); + } } - // TODO throttle after first page log_info!("Requesting page: {}", self.iter + 1); log_info!("URL: {}", self.request.url()); - if let Some(throttle_time) = self.throttle_time { - log_info!("Throttling for: {} ms", throttle_time); - self.runner.throttle(throttle_time); - } else if let Some((min, max)) = self.throttle_range { - log_info!("Throttling between: {} ms and {} ms", min, max); - self.runner.throttle_range(min, max); - } - match self.backoff.retry_on_error(&mut self.request) { + + let response = match self.backoff.retry_on_error(&mut self.request) { Ok(response) => { if let Some(page_headers) = response.get_page_headers().borrow() { match (page_headers.next_page(), page_headers.last_page()) { (Some(next), _) => self.page_url = Some(next.url().to_string()), (None, _) => self.page_url = None, } - self.iter += 1; - return Some(Ok(response)); - } - self.page_url = None; - return Some(Ok(response)); + } else { + self.page_url = None; + }; + Some(Ok(response)) } Err(err) => { self.page_url = None; - return Some(Err(err)); + Some(Err(err)) } - } - // Get response and throttle. + }; + self.iter += 1; + return response; } None } @@ -736,7 +736,7 @@ mod test { assert_eq!(3, responses.len()); let buffer = LOG_BUFFER.lock().unwrap(); assert!(buffer.contains("Throttling for: 1 ms")); - assert_eq!(Milliseconds::new(3), *client.milliseconds_throttled()); + assert_eq!(Milliseconds::new(2), *client.milliseconds_throttled()); } #[test] @@ -759,7 +759,7 @@ mod test { assert_eq!(2, responses.len()); let buffer = LOG_BUFFER.lock().unwrap(); assert!(buffer.contains("Throttling between: 1 ms and 3 ms")); - assert_eq!(2, *client.throttled()); + assert_eq!(1, *client.throttled()); } #[test] From f8f506e3f7f372c68a62243bc627a591e0610345 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Tue, 15 Oct 2024 00:30:44 -0700 Subject: [PATCH 05/28] io: make parse link headers private parse_page_headers is the public one that takes the Headers struct. --- src/io.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io.rs b/src/io.rs index b4665f0..1d5477f 100644 --- a/src/io.rs +++ b/src/io.rs @@ -149,7 +149,7 @@ const NEXT: &str = "next"; const LAST: &str = "last"; pub const LINK_HEADER: &str = "link"; -pub fn parse_link_headers(link: &str) -> PageHeader { +fn parse_link_headers(link: &str) -> PageHeader { lazy_static! { static ref RE_URL: Regex = Regex::new(r#"<([^>]+)>;\s*rel="([^"]+)""#).unwrap(); static ref RE_PAGE_NUMBER: Regex = Regex::new(r"[^(per_)]page=(\d+)").unwrap(); From 02219874f2a6b0d5aec1052ec738c283f6009a03 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Wed, 16 Oct 2024 21:58:53 -0700 Subject: [PATCH 06/28] Add throttle module --- src/http/throttle.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 src/http/throttle.rs diff --git a/src/http/throttle.rs b/src/http/throttle.rs new file mode 100644 index 0000000..a5feff1 --- /dev/null +++ b/src/http/throttle.rs @@ -0,0 +1,53 @@ +use std::thread; + +use rand::Rng; + +use crate::{io::FlowControlHeaders, log_info, time::Milliseconds}; + +/// Throttle strategy +pub trait ThrottleStrategy { + /// Throttle the request based on optional flow control headers. + /// Implementors might use the headers to adjust the throttling or ignore + /// them altogether. Ex. strategies could be a fixed delay, random, or based + /// on rate limiting headers. + fn throttle(&self, response: Option<&FlowControlHeaders>); +} + +pub struct Fixed { + delay: Milliseconds, +} + +impl Fixed { + pub fn new(delay: Milliseconds) -> Self { + Self { delay } + } +} + +impl ThrottleStrategy for Fixed { + fn throttle(&self, _response: Option<&FlowControlHeaders>) { + thread::sleep(std::time::Duration::from_millis(*self.delay)); + } +} + +pub struct Random { + delay_min: Milliseconds, + delay_max: Milliseconds, +} + +impl Random { + pub fn new(delay_min: Milliseconds, delay_max: Milliseconds) -> Self { + Self { + delay_min, + delay_max, + } + } +} + +impl ThrottleStrategy for Random { + fn throttle(&self, _response: Option<&FlowControlHeaders>) { + let mut rng = rand::thread_rng(); + let wait_time = rng.gen_range(*self.delay_min..=*self.delay_max); + log_info!("Sleeping for {} milliseconds", wait_time); + thread::sleep(std::time::Duration::from_millis(wait_time)); + } +} From f233df6e8e15e8907eb20f1167c10eca1f72b912 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Wed, 16 Oct 2024 22:23:09 -0700 Subject: [PATCH 07/28] http: Update response rate limit headers In the case that the remote does not provide rate limiting headers, then update the response ones based on gitar defaults. --- src/http.rs | 28 +++++++++++++++++++++------- src/io.rs | 4 ++++ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/http.rs b/src/http.rs index fa40943..e71c415 100644 --- a/src/http.rs +++ b/src/http.rs @@ -1,3 +1,5 @@ +pub mod throttle; + use crate::api_traits::ApiOperation; use crate::backoff::{Backoff, Exponential}; use crate::cache::{Cache, CacheState}; @@ -79,14 +81,14 @@ impl Client { // log debug response headers log_debug!("Response headers: {:?}", headers); let body = response.into_string().unwrap_or_default(); - let response = HttpResponse::builder() + let mut response = HttpResponse::builder() .status(status) .body(body) .headers(headers) .flow_control_headers(flow_control_headers) .build() .unwrap(); - self.handle_rate_limit(&response)?; + self.handle_rate_limit(&mut response)?; Ok(response) } Err(err) => Err(GRError::HttpTransportError(err.to_string()).into()), @@ -95,7 +97,7 @@ impl Client { } impl Client { - fn handle_rate_limit(&self, response: &HttpResponse) -> Result<()> { + fn handle_rate_limit(&self, response: &mut HttpResponse) -> Result<()> { if let Some(headers) = response.get_ratelimit_headers().borrow() { if headers.remaining <= self.config.rate_limit_remaining_threshold() { log_error!("Rate limit threshold reached"); @@ -109,6 +111,7 @@ impl Client { // limits. log_info!("Rate limit headers not provided by remote, using defaults"); default_rate_limit_handler( + response, &self.config, &self.time_to_ratelimit_reset, &self.remaining_requests, @@ -119,6 +122,7 @@ impl Client { } fn default_rate_limit_handler( + response: &mut HttpResponse, config: &Arc, time_to_ratelimit_reset: &Mutex, remaining_requests: &Mutex, @@ -137,6 +141,12 @@ fn default_rate_limit_handler( api_defaults::DEFAULT_NUMBER_REQUESTS_MINUTE, time_to_ratelimit_reset ); + let rate_limit_header = RateLimitHeader::new( + *remaining_requests, + time_to_ratelimit_reset, + time_to_ratelimit_reset, + ); + response.update_rate_limit_headers(rate_limit_header); return Err(error::GRError::RateLimitExceeded(RateLimitHeader::new( *remaining_requests, time_to_ratelimit_reset, @@ -607,27 +617,27 @@ mod test { Seconds::new(60), ))), ); - let response = HttpResponse::builder() + let mut response = HttpResponse::builder() .status(200) .headers(headers) .flow_control_headers(flow_control_headers) .build() .unwrap(); let client = Client::new(cache::NoCache, Arc::new(ConfigMock::new(1)), false); - assert!(client.handle_rate_limit(&response).is_err()); + assert!(client.handle_rate_limit(&mut response).is_err()); } #[test] fn test_ratelimit_remaining_threshold_not_reached_is_ok() { let mut headers = Headers::new(); headers.set("ratelimit-remaining".to_string(), "11".to_string()); - let response = HttpResponse::builder() + let mut response = HttpResponse::builder() .status(200) .headers(headers) .build() .unwrap(); let client = Client::new(cache::NoCache, Arc::new(ConfigMock::new(1)), false); - assert!(client.handle_rate_limit(&response).is_ok()); + assert!(client.handle_rate_limit(&mut response).is_ok()); } fn epoch_seconds_now_mock(secs: u64) -> Seconds { @@ -650,7 +660,9 @@ mod test { let time_to_ratelimit_reset = time_to_ratelimit_reset.clone(); let config = config.clone(); threads.push(std::thread::spawn(move || { + let mut response = HttpResponse::builder().status(200).build().unwrap(); let result = default_rate_limit_handler( + &mut response, &config, &time_to_ratelimit_reset, &remaining_requests, @@ -684,7 +696,9 @@ mod test { let time_to_ratelimit_reset = time_to_ratelimit_reset.clone(); let config = config.clone(); threads.push(std::thread::spawn(move || { + let mut response = HttpResponse::builder().status(200).build().unwrap(); let result = default_rate_limit_handler( + &mut response, &config, &time_to_ratelimit_reset, &remaining_requests, diff --git a/src/io.rs b/src/io.rs index 1d5477f..e6789d1 100644 --- a/src/io.rs +++ b/src/io.rs @@ -143,6 +143,10 @@ impl HttpResponse { http::Method::PATCH | http::Method::PUT => self.status >= 200 && self.status < 300, } } + + pub fn update_rate_limit_headers(&mut self, headers: RateLimitHeader) { + self.flow_control_headers.rate_limit_header = Rc::new(Some(headers)); + } } const NEXT: &str = "next"; From 890340d799baa8195783e13541b003bb32f7f89f Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Wed, 16 Oct 2024 23:27:59 -0700 Subject: [PATCH 08/28] Inject throttling strategy to Paginator --- src/http.rs | 82 ++++++++++++++++++-------------------------- src/http/throttle.rs | 21 ++++++++++++ src/remote/query.rs | 11 ++++-- src/test.rs | 26 +++++++++++++- 4 files changed, 88 insertions(+), 52 deletions(-) diff --git a/src/http.rs b/src/http.rs index e71c415..ed95521 100644 --- a/src/http.rs +++ b/src/http.rs @@ -9,7 +9,7 @@ use crate::io::{ parse_page_headers, parse_ratelimit_headers, FlowControlHeaders, HttpResponse, HttpRunner, RateLimitHeader, ResponseField, }; -use crate::time::{self, now_epoch_seconds, Milliseconds, Seconds}; +use crate::time::{self, now_epoch_seconds, Seconds}; use crate::{api_defaults, error, log_debug, log_error}; use crate::{log_info, Result}; use serde::{Deserialize, Serialize}; @@ -18,6 +18,7 @@ use std::collections::{hash_map, HashMap}; use std::iter::Iterator; use std::rc::Rc; use std::sync::{Arc, Mutex}; +use throttle::ThrottleStrategy; use ureq::Error; pub struct Client { @@ -376,9 +377,8 @@ pub struct Paginator<'a, R, T> { request: Request<'a, T>, page_url: Option, iter: u32, - throttle_time: Option, - throttle_range: Option<(Milliseconds, Milliseconds)>, backoff: Backoff<'a, R>, + throttler: &'a Box, } impl<'a, R, T> Paginator<'a, R, T> { @@ -386,18 +386,15 @@ impl<'a, R, T> Paginator<'a, R, T> { runner: &'a Arc, request: Request<'a, T>, page_url: &str, - throttle_time: Option, - throttle_range: Option<(Milliseconds, Milliseconds)>, backoff_max_retries: u32, backoff_default_wait_time: u64, + throttle_strategy: &'a Box, ) -> Self { Paginator { runner, request, page_url: Some(page_url.to_string()), iter: 0, - throttle_time, - throttle_range, backoff: Backoff::new( runner, backoff_max_retries, @@ -405,6 +402,7 @@ impl<'a, R, T> Paginator<'a, R, T> { time::now_epoch_seconds, Box::new(Exponential), ), + throttler: throttle_strategy, } } } @@ -423,13 +421,7 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi } if self.iter >= 1 { self.request.set_url(page_url); - if let Some(throttle_time) = self.throttle_time { - log_info!("Throttling for: {} ms", throttle_time); - self.runner.throttle(throttle_time); - } else if let Some((min, max)) = self.throttle_range { - log_info!("Throttling between: {} ms and {} ms", min, max); - self.runner.throttle_range(min, max); - } + self.throttler.throttle(None); } log_info!("Requesting page: {}", self.iter + 1); log_info!("URL: {}", self.request.url()); @@ -460,13 +452,15 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi #[cfg(test)] mod test { + use throttle::NoThrottle; + use super::*; use crate::{ api_defaults::REST_API_MAX_PAGES, cache, io::{Page, PageHeader}, - test::utils::{init_test_logger, ConfigMock, MockRunner, LOG_BUFFER}, + test::utils::{ConfigMock, MockRunner, MockThrottler}, }; fn header_processor_next_page_no_last() -> Rc> { @@ -514,7 +508,8 @@ mod test { let response = HttpResponse::builder().status(200).build().unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -532,7 +527,8 @@ mod test { .unwrap(); let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -543,7 +539,8 @@ mod test { let response2 = response_with_last_page(); let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -557,7 +554,8 @@ mod test { .unwrap(); let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -582,7 +580,8 @@ mod test { MockRunner::new(vec![response3, response2, response1]).with_config(ConfigMock::new(1)), ); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -600,7 +599,8 @@ mod test { responses.reverse(); let request: Request<()> = Request::new("http://localhost", Method::GET); let client = Arc::new(MockRunner::new(responses)); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(REST_API_MAX_PAGES, responses.len() as u32); } @@ -724,56 +724,39 @@ mod test { .max_pages(1) .build() .unwrap(); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } #[test] fn test_paginator_fixed_throttle_enabled() { - init_test_logger(); let response1 = response_with_next_page(); let response2 = response_with_next_page(); let response3 = response_with_last_page(); let client = Arc::new(MockRunner::new(vec![response3, response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - Some(Milliseconds::new(1)), - None, - 0, - 60, - ); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); - let buffer = LOG_BUFFER.lock().unwrap(); - assert!(buffer.contains("Throttling for: 1 ms")); - assert_eq!(Milliseconds::new(2), *client.milliseconds_throttled()); + assert_eq!(2, *throttler.throttled()); } #[test] fn test_paginator_range_throttle_enabled() { - init_test_logger(); let response1 = response_with_next_page(); let response2 = response_with_last_page(); let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - None, - Some((Milliseconds::new(1), Milliseconds::new(3))), - 0, - 60, - ); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); - let buffer = LOG_BUFFER.lock().unwrap(); - assert!(buffer.contains("Throttling between: 1 ms and 3 ms")); - assert_eq!(1, *client.throttled()); + assert_eq!(1, *throttler.throttled()); } #[test] @@ -795,7 +778,8 @@ mod test { .max_pages(5) .build() .unwrap(); - let paginator = Paginator::new(&client, request, "http://localhost", None, None, 0, 60); + let throttler: Box = Box::new(NoThrottle::default()); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } diff --git a/src/http/throttle.rs b/src/http/throttle.rs index a5feff1..5c25bd9 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -25,6 +25,7 @@ impl Fixed { impl ThrottleStrategy for Fixed { fn throttle(&self, _response: Option<&FlowControlHeaders>) { + log_info!("Throttling for: {} ms", self.delay); thread::sleep(std::time::Duration::from_millis(*self.delay)); } } @@ -45,9 +46,29 @@ impl Random { impl ThrottleStrategy for Random { fn throttle(&self, _response: Option<&FlowControlHeaders>) { + log_info!( + "Throttling between: {} ms and {} ms", + self.delay_min, + self.delay_max + ); let mut rng = rand::thread_rng(); let wait_time = rng.gen_range(*self.delay_min..=*self.delay_max); log_info!("Sleeping for {} milliseconds", wait_time); thread::sleep(std::time::Duration::from_millis(wait_time)); } } + +#[derive(Default)] +pub struct NoThrottle; + +impl NoThrottle { + pub fn new() -> Self { + Self {} + } +} + +impl ThrottleStrategy for NoThrottle { + fn throttle(&self, _response: Option<&FlowControlHeaders>) { + log_info!("No throttling enabled"); + } +} diff --git a/src/remote/query.rs b/src/remote/query.rs index 9d322d0..6fbda7f 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -6,6 +6,7 @@ use serde::Serialize; use crate::api_traits::Timestamp; use crate::display::DisplayBody; +use crate::http::throttle::{self, ThrottleStrategy}; use crate::{ api_defaults, api_traits::{ApiOperation, NumberDeltaErr}, @@ -241,14 +242,20 @@ where backoff_max_retries = list_args.get_args.backoff_max_retries; backoff_wait_time = list_args.get_args.backoff_retry_after; } + let throttle_strategy: Box = match throttle_time { + Some(throttle_time) => Box::new(throttle::Fixed::new(throttle_time)), + None => match throttle_range { + Some((min, max)) => Box::new(throttle::Random::new(min, max)), + None => Box::new(throttle::NoThrottle::new()), + }, + }; let paginator = Paginator::new( runner, request, url, - throttle_time, - throttle_range, backoff_max_retries, backoff_wait_time, + &throttle_strategy, ); let all_data = paginator .map(|response| { diff --git a/src/test.rs b/src/test.rs index 6476920..12c4a0f 100644 --- a/src/test.rs +++ b/src/test.rs @@ -6,7 +6,7 @@ pub mod utils { config::ConfigProperties, error, http::{self, Headers, Request}, - io::{HttpResponse, HttpRunner, ShellResponse, TaskRunner}, + io::{self, HttpResponse, HttpRunner, ShellResponse, TaskRunner}, time::Milliseconds, Result, }; @@ -19,6 +19,7 @@ pub mod utils { fs::File, io::Read, ops::Deref, + rc::Rc, sync::{Arc, Mutex}, }; @@ -394,4 +395,27 @@ pub mod utils { self.contracts.into_iter() } } + + pub struct MockThrottler { + throttled: RefCell, + } + + impl MockThrottler { + pub fn new() -> Self { + Self { + throttled: RefCell::new(0), + } + } + + pub fn throttled(&self) -> Ref { + self.throttled.borrow() + } + } + + impl http::throttle::ThrottleStrategy for Rc { + fn throttle(&self, _response: Option<&io::FlowControlHeaders>) { + let mut throttled = self.throttled.borrow_mut(); + *throttled += 1; + } + } } From c3689f031c07031a4a39ad965e73e1d5dcb2fa4e Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Thu, 17 Oct 2024 01:11:22 -0700 Subject: [PATCH 09/28] paginator: set max pages up front, throttle with response --- src/http.rs | 30 ++++++++++++++++++------------ src/io.rs | 4 ++++ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/src/http.rs b/src/http.rs index ed95521..912d685 100644 --- a/src/http.rs +++ b/src/http.rs @@ -373,15 +373,15 @@ impl> HttpRunner for Client { } pub struct Paginator<'a, R, T> { - runner: &'a Arc, request: Request<'a, T>, page_url: Option, iter: u32, backoff: Backoff<'a, R>, throttler: &'a Box, + max_pages: u32, } -impl<'a, R, T> Paginator<'a, R, T> { +impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { pub fn new( runner: &'a Arc, request: Request<'a, T>, @@ -390,8 +390,12 @@ impl<'a, R, T> Paginator<'a, R, T> { backoff_default_wait_time: u64, throttle_strategy: &'a Box, ) -> Self { + let max_pages = if let Some(max_pages) = request.max_pages { + max_pages as u32 + } else { + runner.api_max_pages(&request) + }; Paginator { - runner, request, page_url: Some(page_url.to_string()), iter: 0, @@ -403,6 +407,7 @@ impl<'a, R, T> Paginator<'a, R, T> { Box::new(Exponential), ), throttler: throttle_strategy, + max_pages, } } } @@ -412,16 +417,11 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi fn next(&mut self) -> Option { if let Some(page_url) = &self.page_url { - if let Some(max_pages) = self.request.max_pages { - if self.iter >= max_pages as u32 { - return None; - } - } else if self.iter == self.runner.api_max_pages(&self.request) { + if self.iter >= self.max_pages { return None; } if self.iter >= 1 { self.request.set_url(page_url); - self.throttler.throttle(None); } log_info!("Requesting page: {}", self.iter + 1); log_info!("URL: {}", self.request.url()); @@ -436,15 +436,21 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi } else { self.page_url = None; }; - Some(Ok(response)) + Ok(response) } Err(err) => { self.page_url = None; - Some(Err(err)) + Err(err) } }; self.iter += 1; - return response; + if self.iter < self.max_pages && self.page_url.is_some() { + // technically no need to check ok on response, as page_url is Some + // (response was Ok) + self.throttler + .throttle(Some(&response.as_ref().unwrap().get_flow_control_headers())); + } + return Some(response); } None } diff --git a/src/io.rs b/src/io.rs index e6789d1..ed94b5a 100644 --- a/src/io.rs +++ b/src/io.rs @@ -129,6 +129,10 @@ impl HttpResponse { self.flow_control_headers.get_rate_limit_header() } + pub fn get_flow_control_headers(&self) -> &FlowControlHeaders { + &self.flow_control_headers + } + pub fn get_etag(&self) -> Option<&str> { self.header("etag") } From 23aaf872a9962c2e1c5539bd62d0f7285d55b68a Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Thu, 17 Oct 2024 21:40:13 -0700 Subject: [PATCH 10/28] HttpResponse: local_cache local_cache is a flag that is set when the response is retrieved from the local cache. The use case is to disable throttling when the response is retrieved from the local cache. --- src/http.rs | 3 ++- src/io.rs | 2 ++ tests/http_test.rs | 5 ++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/http.rs b/src/http.rs index 912d685..1348bc3 100644 --- a/src/http.rs +++ b/src/http.rs @@ -327,10 +327,11 @@ impl> HttpRunner for Client { Method::GET => { let mut default_response = HttpResponse::builder().build().unwrap(); match self.cache.get(&cmd.resource) { - Ok(CacheState::Fresh(response)) => { + Ok(CacheState::Fresh(mut response)) => { log_debug!("Cache fresh for {}", cmd.resource.url); if !self.refresh_cache { log_debug!("Returning local cached response"); + response.local_cache = true; return Ok(response); } default_response = response; diff --git a/src/io.rs b/src/io.rs index ed94b5a..910d2fa 100644 --- a/src/io.rs +++ b/src/io.rs @@ -98,6 +98,8 @@ pub struct HttpResponse { pub headers: Option, #[builder(setter(into), default)] pub flow_control_headers: FlowControlHeaders, + #[builder(setter(into), default)] + pub local_cache: bool, } impl HttpResponse { diff --git a/tests/http_test.rs b/tests/http_test.rs index d9c8aae..f72a213 100644 --- a/tests/http_test.rs +++ b/tests/http_test.rs @@ -4,7 +4,7 @@ use gr::cache::{Cache, InMemoryCache, NoCache}; use gr::config::ConfigProperties; use gr::error::GRError; use gr::http::{Client, Headers, Method, Request}; -use gr::io::{HttpRunner, HttpResponse, ResponseField}; +use gr::io::{HttpResponse, HttpRunner, ResponseField}; use httpmock::prelude::*; use httpmock::Method::{GET, HEAD, PATCH, POST}; @@ -150,6 +150,8 @@ fn test_http_gathers_from_inmemory_fresh_cache() { assert_eq!(response.status, 200); assert!(response.body.contains("id")); + assert!(response.local_cache); + // Mock was never called. We used the cache server_mock.assert_hits(0); } @@ -196,6 +198,7 @@ fn test_http_gathers_from_inmemory_stale_cache_server_304() { let response = runner.run(&mut request).unwrap(); assert_eq!(response.status, 200); + assert!(!response.local_cache); assert!(response.body.contains("id")); // While do we have a cache, the cache was expired, hence we expect the From 4e3b49a495ee61bd7c733cf6fc63a092fe8fabfe Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Thu, 17 Oct 2024 21:59:14 -0700 Subject: [PATCH 11/28] Do not throttle if the response is from local cache --- src/http.rs | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/src/http.rs b/src/http.rs index 1348bc3..c7b8d31 100644 --- a/src/http.rs +++ b/src/http.rs @@ -446,10 +446,13 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi }; self.iter += 1; if self.iter < self.max_pages && self.page_url.is_some() { - // technically no need to check ok on response, as page_url is Some + // Technically no need to check ok on response, as page_url is Some // (response was Ok) - self.throttler - .throttle(Some(&response.as_ref().unwrap().get_flow_control_headers())); + let response = response.as_ref().unwrap(); + if !response.local_cache { + self.throttler + .throttle(Some(&response.get_flow_control_headers())); + } } return Some(response); } @@ -510,6 +513,36 @@ mod test { response } + fn cached_response_next_page() -> HttpResponse { + let mut headers = Headers::new(); + headers.set("link".to_string(), "http://localhost?page=2".to_string()); + let flow_control_headers = + FlowControlHeaders::new(header_processor_next_page_no_last(), Rc::new(None)); + let response = HttpResponse::builder() + .status(200) + .headers(headers) + .flow_control_headers(flow_control_headers) + .local_cache(true) + .build() + .unwrap(); + response + } + + fn cached_response_last_page() -> HttpResponse { + let mut headers = Headers::new(); + headers.set("link".to_string(), "http://localhost?page=2".to_string()); + let flow_control_headers = + FlowControlHeaders::new(header_processor_last_page_no_next(), Rc::new(None)); + let response = HttpResponse::builder() + .status(200) + .headers(headers) + .flow_control_headers(flow_control_headers) + .local_cache(true) + .build() + .unwrap(); + response + } + #[test] fn test_paginator_no_headers_no_next_no_last_pages() { let response = HttpResponse::builder().status(200).build().unwrap(); @@ -766,6 +799,21 @@ mod test { assert_eq!(1, *throttler.throttled()); } + #[test] + fn test_paginator_no_throttle_if_response_is_from_local_cache() { + let response1 = cached_response_next_page(); + let response2 = cached_response_next_page(); + let response3 = cached_response_last_page(); + let client = Arc::new(MockRunner::new(vec![response3, response2, response1])); + let request: Request<()> = Request::new("http://localhost", Method::GET); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); + let responses = paginator.collect::>>(); + assert_eq!(3, responses.len()); + assert_eq!(0, *throttler.throttled()); + } + #[test] fn test_user_request_from_up_to_pages_takes_over_max_api_pages() { let mut responses = Vec::new(); From 06a3893c724fea60ebcb786a26d4cf845304c8ec Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Thu, 17 Oct 2024 22:06:30 -0700 Subject: [PATCH 12/28] throttle trait - rename arg to flow control headers --- src/http/throttle.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 5c25bd9..1c87056 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -10,7 +10,7 @@ pub trait ThrottleStrategy { /// Implementors might use the headers to adjust the throttling or ignore /// them altogether. Ex. strategies could be a fixed delay, random, or based /// on rate limiting headers. - fn throttle(&self, response: Option<&FlowControlHeaders>); + fn throttle(&self, flow_control_headers: Option<&FlowControlHeaders>); } pub struct Fixed { @@ -24,7 +24,7 @@ impl Fixed { } impl ThrottleStrategy for Fixed { - fn throttle(&self, _response: Option<&FlowControlHeaders>) { + fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) { log_info!("Throttling for: {} ms", self.delay); thread::sleep(std::time::Duration::from_millis(*self.delay)); } @@ -45,7 +45,7 @@ impl Random { } impl ThrottleStrategy for Random { - fn throttle(&self, _response: Option<&FlowControlHeaders>) { + fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) { log_info!( "Throttling between: {} ms and {} ms", self.delay_min, @@ -68,7 +68,7 @@ impl NoThrottle { } impl ThrottleStrategy for NoThrottle { - fn throttle(&self, _response: Option<&FlowControlHeaders>) { + fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) { log_info!("No throttling enabled"); } } From bca4c779575f0de0bc40ee3ff72ea0cde76848cd Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Thu, 17 Oct 2024 22:33:00 -0700 Subject: [PATCH 13/28] Clippy fixes --- src/http.rs | 105 ++++++++++++++++++++++++++++++++++++++------ src/remote/query.rs | 4 +- 2 files changed, 93 insertions(+), 16 deletions(-) diff --git a/src/http.rs b/src/http.rs index c7b8d31..df31540 100644 --- a/src/http.rs +++ b/src/http.rs @@ -378,7 +378,7 @@ pub struct Paginator<'a, R, T> { page_url: Option, iter: u32, backoff: Backoff<'a, R>, - throttler: &'a Box, + throttler: &'a dyn ThrottleStrategy, max_pages: u32, } @@ -389,7 +389,7 @@ impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { page_url: &str, backoff_max_retries: u32, backoff_default_wait_time: u64, - throttle_strategy: &'a Box, + throttle_strategy: &'a dyn ThrottleStrategy, ) -> Self { let max_pages = if let Some(max_pages) = request.max_pages { max_pages as u32 @@ -451,7 +451,7 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi let response = response.as_ref().unwrap(); if !response.local_cache { self.throttler - .throttle(Some(&response.get_flow_control_headers())); + .throttle(Some(response.get_flow_control_headers())); } } return Some(response); @@ -549,7 +549,14 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -568,7 +575,14 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -580,7 +594,14 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -595,7 +616,14 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -621,7 +649,14 @@ mod test { ); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -640,7 +675,14 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let client = Arc::new(MockRunner::new(responses)); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(REST_API_MAX_PAGES, responses.len() as u32); } @@ -765,7 +807,14 @@ mod test { .build() .unwrap(); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -779,7 +828,14 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + bthrottler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(2, *throttler.throttled()); @@ -793,7 +849,14 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + bthrottler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); assert_eq!(1, *throttler.throttled()); @@ -808,7 +871,14 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &bthrottler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + bthrottler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(0, *throttler.throttled()); @@ -834,7 +904,14 @@ mod test { .build() .unwrap(); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, &throttler); + let paginator = Paginator::new( + &client, + request, + "http://localhost", + 0, + 60, + throttler.as_ref(), + ); let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } diff --git a/src/remote/query.rs b/src/remote/query.rs index 6fbda7f..813a378 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -19,7 +19,7 @@ use crate::{ Result, }; -fn get_remote_resource_headers<'a, R: HttpRunner>( +fn get_remote_resource_headers>( runner: &Arc, url: &str, request_headers: Headers, @@ -255,7 +255,7 @@ where url, backoff_max_retries, backoff_wait_time, - &throttle_strategy, + throttle_strategy.as_ref(), ); let all_data = paginator .map(|response| { From a4fb4f392292c047b296ed3361998ab8e7c6d52d Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Fri, 18 Oct 2024 21:33:55 -0700 Subject: [PATCH 14/28] Remove throttle from HTTPRunner trait --- src/backoff.rs | 122 ++++++++++++++++++++++++++++++------------- src/http.rs | 16 +++--- src/http/throttle.rs | 23 ++++++-- src/io.rs | 17 +----- src/remote/query.rs | 2 +- src/test.rs | 27 +++++----- 6 files changed, 130 insertions(+), 77 deletions(-) diff --git a/src/backoff.rs b/src/backoff.rs index 56f3a0c..1c4a5da 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use serde::Serialize; use crate::error::{AddContext, GRError}; +use crate::http::throttle::ThrottleStrategy; use crate::io::{HttpRunner, RateLimitHeader}; use crate::log_error; use crate::{error, log_info, Result}; @@ -17,7 +18,8 @@ pub struct Backoff<'a, R> { rate_limit_header: RateLimitHeader, default_delay_wait: Seconds, now: fn() -> Seconds, - strategy: Box, + backoff_strategy: Box, + throttler: Box, } impl<'a, R> Backoff<'a, R> { @@ -27,6 +29,7 @@ impl<'a, R> Backoff<'a, R> { default_delay_wait: u64, now: fn() -> Seconds, strategy: Box, + throttler_strategy: Box, ) -> Self { Backoff { runner, @@ -35,7 +38,8 @@ impl<'a, R> Backoff<'a, R> { rate_limit_header: RateLimitHeader::default(), default_delay_wait: Seconds::new(default_delay_wait), now, - strategy, + backoff_strategy: strategy, + throttler: throttler_strategy, } } } @@ -73,8 +77,8 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { if self.rate_limit_header.retry_after > Seconds::new(0) { base_wait_time = self.rate_limit_header.retry_after; } - self.runner.throttle( - self.strategy + self.throttler.throttle_for( + self.backoff_strategy .wait_time(base_wait_time, self.num_retries) .into(), ); @@ -87,8 +91,8 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { ) => { self.num_retries += 1; if self.num_retries <= self.max_retries { - self.runner.throttle( - self.strategy + self.throttler.throttle_for( + self.backoff_strategy .wait_time(self.default_delay_wait, self.num_retries) .into(), ); @@ -132,7 +136,7 @@ mod tests { use crate::{ http::{self, Headers, Resource}, io::FlowControlHeaders, - test::utils::MockRunner, + test::utils::{MockRunner, MockThrottler}, time::Milliseconds, }; @@ -194,9 +198,11 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(2, *client.throttled()); + assert_eq!(2, *throttler.throttled()); } #[test] @@ -214,13 +220,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { Ok(_) => panic!("Expected max retries reached error"), Err(err) => match err.downcast_ref::() { Some(error::GRError::ExponentialBackoffMaxRetriesReached(_)) => { - assert_eq!(1, *client.throttled()); - assert_eq!(Milliseconds::new(62000), *client.milliseconds_throttled()); + assert_eq!(1, *throttler.throttled()); + assert_eq!( + Milliseconds::new(62000), + *throttler.milliseconds_throttled() + ); } _ => panic!("Expected max retries reached error"), }, @@ -237,9 +248,11 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(0, *client.throttled()); + assert_eq!(0, *throttler.throttled()); } #[test] @@ -252,7 +265,9 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { Ok(_) => panic!("Expected rate limit exceeded error"), Err(err) => match err.downcast_ref::() { @@ -277,13 +292,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(2, *client.throttled()); + assert_eq!(2, *throttler.throttled()); // 60 secs base wait, 1st retry 2^1 = 2 => 62000 milliseconds // 60 secs base wait, 2nd retry 2^2 = 4 => 64000 milliseconds // Total wait 126000 - assert_eq!(Milliseconds::new(126000), *client.milliseconds_throttled()); + assert_eq!( + Milliseconds::new(126000), + *throttler.milliseconds_throttled() + ); } #[test] @@ -301,13 +321,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(2, *client.throttled()); + assert_eq!(2, *throttler.throttled()); // 61 secs base wait, 1st retry 2^1 = 2 => 63000 milliseconds // 65 secs base wait, 2nd retry 2^2 = 4 => 69000 milliseconds // Total wait 132000 - assert_eq!(Milliseconds::new(132000), *client.milliseconds_throttled()); + assert_eq!( + Milliseconds::new(132000), + *throttler.milliseconds_throttled() + ); } #[test] @@ -326,13 +351,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(2, *client.throttled()); + assert_eq!(2, *throttler.throttled()); // 120 secs base wait, 1st retry 2^1 = 2 => 122000 milliseconds // 61 secs base wait, 2nd retry 2^2 = 4 => 65000 milliseconds // Total wait 187000 - assert_eq!(Milliseconds::new(187000), *client.milliseconds_throttled()); + assert_eq!( + Milliseconds::new(187000), + *throttler.milliseconds_throttled() + ); } #[test] @@ -345,11 +375,16 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(1, *client.throttled()); + assert_eq!(1, *throttler.throttled()); // Success on 2nd retry. Wait time of 1min + 2^1 = 2 => 62000 milliseconds - assert_eq!(Milliseconds::new(62000), *client.milliseconds_throttled()); + assert_eq!( + Milliseconds::new(62000), + *throttler.milliseconds_throttled() + ); } #[test] @@ -362,13 +397,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { Ok(_) => panic!("Expected max retries reached error"), Err(err) => match err.downcast_ref::() { Some(error::GRError::ExponentialBackoffMaxRetriesReached(_)) => { - assert_eq!(1, *client.throttled()); - assert_eq!(Milliseconds::new(62000), *client.milliseconds_throttled()); + assert_eq!(1, *throttler.throttled()); + assert_eq!( + Milliseconds::new(62000), + *throttler.milliseconds_throttled() + ); } _ => panic!("Expected max retries reached error"), }, @@ -385,11 +425,16 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); - assert_eq!(1, *client.throttled()); + assert_eq!(1, *throttler.throttled()); // Success on 2nd retry. Wait time of 1min + 2^1 = 2 => 62000 milliseconds - assert_eq!(Milliseconds::new(62000), *client.milliseconds_throttled()); + assert_eq!( + Milliseconds::new(62000), + *throttler.milliseconds_throttled() + ); } #[test] @@ -402,13 +447,18 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy); + let throttler = Rc::new(MockThrottler::new()); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { Ok(_) => panic!("Expected max retries reached error"), Err(err) => match err.downcast_ref::() { Some(error::GRError::ExponentialBackoffMaxRetriesReached(_)) => { - assert_eq!(1, *client.throttled()); - assert_eq!(Milliseconds::new(62000), *client.milliseconds_throttled()); + assert_eq!(1, *throttler.throttled()); + assert_eq!( + Milliseconds::new(62000), + *throttler.milliseconds_throttled() + ); } _ => panic!("Expected max retries reached error"), }, diff --git a/src/http.rs b/src/http.rs index df31540..77cdffb 100644 --- a/src/http.rs +++ b/src/http.rs @@ -396,17 +396,19 @@ impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { } else { runner.api_max_pages(&request) }; + let backoff = Backoff::new( + runner, + backoff_max_retries, + backoff_default_wait_time, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); Paginator { request, page_url: Some(page_url.to_string()), iter: 0, - backoff: Backoff::new( - runner, - backoff_max_retries, - backoff_default_wait_time, - time::now_epoch_seconds, - Box::new(Exponential), - ), + backoff, throttler: throttle_strategy, max_pages, } diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 1c87056..257b4bb 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -11,19 +11,36 @@ pub trait ThrottleStrategy { /// them altogether. Ex. strategies could be a fixed delay, random, or based /// on rate limiting headers. fn throttle(&self, flow_control_headers: Option<&FlowControlHeaders>); + /// Throttle for specific amount of time. + fn throttle_for(&self, delay: Milliseconds) { + log_info!("Throttling for backoff: {} ms", delay); + thread::sleep(std::time::Duration::from_millis(*delay)); + } +} + +pub struct DynamicFixed; + +impl DynamicFixed { + pub fn new() -> Self { + Self {} + } +} + +impl ThrottleStrategy for DynamicFixed { + fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) {} } -pub struct Fixed { +pub struct PreFixed { delay: Milliseconds, } -impl Fixed { +impl PreFixed { pub fn new(delay: Milliseconds) -> Self { Self { delay } } } -impl ThrottleStrategy for Fixed { +impl ThrottleStrategy for PreFixed { fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) { log_info!("Throttling for: {} ms", self.delay); thread::sleep(std::time::Duration::from_millis(*self.delay)); diff --git a/src/io.rs b/src/io.rs index 910d2fa..151ce03 100644 --- a/src/io.rs +++ b/src/io.rs @@ -7,7 +7,7 @@ use crate::{ http::{self, Headers, Request}, log_info, remote::RemoteURL, - time::{self, Milliseconds, Seconds}, + time::{self, Seconds}, Result, }; use regex::Regex; @@ -16,11 +16,8 @@ use std::{ ffi::OsStr, fmt::{self, Display, Formatter}, rc::Rc, - thread, }; -use rand::Rng; - /// A trait that handles the execution of processes with a finite lifetime. For /// example, it can be an in-memory process for testing or a shell command doing /// I/O. It handles all processes that do not conform with the HTTP protocol. @@ -42,18 +39,6 @@ pub trait HttpRunner { fn run(&self, cmd: &mut Request) -> Result; /// Return the number of API MAX PAGES allowed for the given Request. fn api_max_pages(&self, cmd: &Request) -> u32; - /// Milliseconds to wait before executing the next request - fn throttle(&self, milliseconds: Milliseconds) { - thread::sleep(std::time::Duration::from_millis(*milliseconds)); - } - /// Random wait time between the given range before submitting the next HTTP - /// request. The wait time is in milliseconds. The range is inclusive. - fn throttle_range(&self, min: Milliseconds, max: Milliseconds) { - let mut rng = rand::thread_rng(); - let wait_time = rng.gen_range(*min..=*max); - log_info!("Sleeping for {} milliseconds", wait_time); - thread::sleep(std::time::Duration::from_millis(wait_time)); - } } #[derive(Clone, Debug)] diff --git a/src/remote/query.rs b/src/remote/query.rs index 813a378..d685d0b 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -243,7 +243,7 @@ where backoff_wait_time = list_args.get_args.backoff_retry_after; } let throttle_strategy: Box = match throttle_time { - Some(throttle_time) => Box::new(throttle::Fixed::new(throttle_time)), + Some(throttle_time) => Box::new(throttle::PreFixed::new(throttle_time)), None => match throttle_range { Some((min, max)) => Box::new(throttle::Random::new(min, max)), None => Box::new(throttle::NoThrottle::new()), diff --git a/src/test.rs b/src/test.rs index 12c4a0f..0bfc829 100644 --- a/src/test.rs +++ b/src/test.rs @@ -171,20 +171,6 @@ pub mod utils { .unwrap_or(&ApiOperation::Project), ) } - - fn throttle(&self, milliseconds: Milliseconds) { - let mut throttled = self.throttled.borrow_mut(); - *throttled += 1; - let mut milliseconds_throttled = self.milliseconds_throttled.borrow_mut(); - *milliseconds_throttled += milliseconds; - } - - fn throttle_range(&self, min: Milliseconds, _max: Milliseconds) { - let mut throttled = self.throttled.borrow_mut(); - *throttled += 1; - let mut milliseconds_throttled = self.milliseconds_throttled.borrow_mut(); - *milliseconds_throttled += min; - } } pub struct ConfigMock { @@ -398,18 +384,24 @@ pub mod utils { pub struct MockThrottler { throttled: RefCell, + milliseconds_throttled: RefCell, } impl MockThrottler { pub fn new() -> Self { Self { throttled: RefCell::new(0), + milliseconds_throttled: RefCell::new(Milliseconds::new(0)), } } pub fn throttled(&self) -> Ref { self.throttled.borrow() } + + pub fn milliseconds_throttled(&self) -> Ref { + self.milliseconds_throttled.borrow() + } } impl http::throttle::ThrottleStrategy for Rc { @@ -417,5 +409,12 @@ pub mod utils { let mut throttled = self.throttled.borrow_mut(); *throttled += 1; } + + fn throttle_for(&self, delay: Milliseconds) { + let mut throttled = self.throttled.borrow_mut(); + *throttled += 1; + let mut milliseconds_throttled = self.milliseconds_throttled.borrow_mut(); + *milliseconds_throttled += delay; + } } } From 755be026e8c16200ed8104b26916a289b05536f0 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Fri, 18 Oct 2024 21:38:21 -0700 Subject: [PATCH 15/28] Paginator: take ownership on throttling --- src/http.rs | 103 ++++++-------------------------------------- src/remote/query.rs | 2 +- 2 files changed, 14 insertions(+), 91 deletions(-) diff --git a/src/http.rs b/src/http.rs index 77cdffb..845626b 100644 --- a/src/http.rs +++ b/src/http.rs @@ -378,7 +378,7 @@ pub struct Paginator<'a, R, T> { page_url: Option, iter: u32, backoff: Backoff<'a, R>, - throttler: &'a dyn ThrottleStrategy, + throttler: Box, max_pages: u32, } @@ -389,7 +389,7 @@ impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { page_url: &str, backoff_max_retries: u32, backoff_default_wait_time: u64, - throttle_strategy: &'a dyn ThrottleStrategy, + throttle_strategy: Box, ) -> Self { let max_pages = if let Some(max_pages) = request.max_pages { max_pages as u32 @@ -551,14 +551,7 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -577,14 +570,7 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -596,14 +582,7 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -618,14 +597,7 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -651,14 +623,7 @@ mod test { ); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -677,14 +642,7 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let client = Arc::new(MockRunner::new(responses)); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(REST_API_MAX_PAGES, responses.len() as u32); } @@ -809,14 +767,7 @@ mod test { .build() .unwrap(); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -830,14 +781,7 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - bthrottler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(2, *throttler.throttled()); @@ -851,14 +795,7 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - bthrottler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); assert_eq!(1, *throttler.throttled()); @@ -873,14 +810,7 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - bthrottler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(0, *throttler.throttled()); @@ -906,14 +836,7 @@ mod test { .build() .unwrap(); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new( - &client, - request, - "http://localhost", - 0, - 60, - throttler.as_ref(), - ); + let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } diff --git a/src/remote/query.rs b/src/remote/query.rs index d685d0b..92d00da 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -255,7 +255,7 @@ where url, backoff_max_retries, backoff_wait_time, - throttle_strategy.as_ref(), + throttle_strategy, ); let all_data = paginator .map(|response| { From d8c5cde29093b67c7a90011b68f1bf6ccca1af2b Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Fri, 18 Oct 2024 21:50:46 -0700 Subject: [PATCH 16/28] Inject backoff into paginator --- src/http.rs | 124 ++++++++++++++++++++++++++++++++++++-------- src/remote/query.rs | 11 ++-- 2 files changed, 109 insertions(+), 26 deletions(-) diff --git a/src/http.rs b/src/http.rs index 845626b..de24736 100644 --- a/src/http.rs +++ b/src/http.rs @@ -1,7 +1,7 @@ pub mod throttle; use crate::api_traits::ApiOperation; -use crate::backoff::{Backoff, Exponential}; +use crate::backoff::Backoff; use crate::cache::{Cache, CacheState}; use crate::config::ConfigProperties; use crate::error::GRError; @@ -387,8 +387,7 @@ impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { runner: &'a Arc, request: Request<'a, T>, page_url: &str, - backoff_max_retries: u32, - backoff_default_wait_time: u64, + backoff: Backoff<'a, R>, throttle_strategy: Box, ) -> Self { let max_pages = if let Some(max_pages) = request.max_pages { @@ -396,14 +395,6 @@ impl<'a, R: HttpRunner, T: Serialize> Paginator<'a, R, T> { } else { runner.api_max_pages(&request) }; - let backoff = Backoff::new( - runner, - backoff_max_retries, - backoff_default_wait_time, - time::now_epoch_seconds, - Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), - ); Paginator { request, page_url: Some(page_url.to_string()), @@ -470,6 +461,7 @@ mod test { use crate::{ api_defaults::REST_API_MAX_PAGES, + backoff::Exponential, cache, io::{Page, PageHeader}, test::utils::{ConfigMock, MockRunner, MockThrottler}, @@ -551,7 +543,15 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -570,7 +570,15 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -582,7 +590,15 @@ mod test { let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); } @@ -597,7 +613,15 @@ mod test { let client = Arc::new(MockRunner::new(vec![response])); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); assert_eq!("http://localhost", *client.url()); @@ -623,7 +647,15 @@ mod test { ); let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -642,7 +674,15 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let client = Arc::new(MockRunner::new(responses)); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(REST_API_MAX_PAGES, responses.len() as u32); } @@ -767,7 +807,15 @@ mod test { .build() .unwrap(); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(1, responses.len()); } @@ -781,7 +829,15 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(2, *throttler.throttled()); @@ -795,7 +851,15 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); assert_eq!(2, responses.len()); assert_eq!(1, *throttler.throttled()); @@ -810,7 +874,15 @@ mod test { let request: Request<()> = Request::new("http://localhost", Method::GET); let throttler = Rc::new(MockThrottler::new()); let bthrottler: Box = Box::new(Rc::clone(&throttler)); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, bthrottler); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); assert_eq!(3, responses.len()); assert_eq!(0, *throttler.throttled()); @@ -835,8 +907,16 @@ mod test { .max_pages(5) .build() .unwrap(); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), + ); let throttler: Box = Box::new(NoThrottle::default()); - let paginator = Paginator::new(&client, request, "http://localhost", 0, 60, throttler); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } diff --git a/src/remote/query.rs b/src/remote/query.rs index 92d00da..d1423af 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -5,8 +5,10 @@ use std::sync::Arc; use serde::Serialize; use crate::api_traits::Timestamp; +use crate::backoff::{Backoff, Exponential}; use crate::display::DisplayBody; use crate::http::throttle::{self, ThrottleStrategy}; +use crate::time; use crate::{ api_defaults, api_traits::{ApiOperation, NumberDeltaErr}, @@ -249,14 +251,15 @@ where None => Box::new(throttle::NoThrottle::new()), }, }; - let paginator = Paginator::new( + let backoff = Backoff::new( runner, - request, - url, backoff_max_retries, backoff_wait_time, - throttle_strategy, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed::new()), ); + let paginator = Paginator::new(runner, request, url, backoff, throttle_strategy); let all_data = paginator .map(|response| { let response = response?; From 8843a47deb08c0a757f1f5916a5cbb5c28ea4a1e Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Fri, 18 Oct 2024 22:09:37 -0700 Subject: [PATCH 17/28] Use throttle DynamixFixed instead of new() --- src/http.rs | 22 +++++++++++----------- src/http/throttle.rs | 6 ------ src/remote/query.rs | 2 +- 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/http.rs b/src/http.rs index de24736..e2f5382 100644 --- a/src/http.rs +++ b/src/http.rs @@ -549,7 +549,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -576,7 +576,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -596,7 +596,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -619,7 +619,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -653,7 +653,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -680,7 +680,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -813,7 +813,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); let responses = paginator.collect::>>(); @@ -835,7 +835,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); @@ -857,7 +857,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); @@ -880,7 +880,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); let responses = paginator.collect::>>(); @@ -913,7 +913,7 @@ mod test { 60, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let throttler: Box = Box::new(NoThrottle::default()); let paginator = Paginator::new(&client, request, "http://localhost", backoff, throttler); diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 257b4bb..b181f2b 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -20,12 +20,6 @@ pub trait ThrottleStrategy { pub struct DynamicFixed; -impl DynamicFixed { - pub fn new() -> Self { - Self {} - } -} - impl ThrottleStrategy for DynamicFixed { fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) {} } diff --git a/src/remote/query.rs b/src/remote/query.rs index d1423af..f2a7c1d 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -257,7 +257,7 @@ where backoff_wait_time, time::now_epoch_seconds, Box::new(Exponential), - Box::new(throttle::DynamicFixed::new()), + Box::new(throttle::DynamicFixed), ); let paginator = Paginator::new(runner, request, url, backoff, throttle_strategy); let all_data = paginator From d25fa0fd33796c7a5f595d7be87225dd8c4eb456 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 09:05:08 -0700 Subject: [PATCH 18/28] backoff: adjust logging --- src/backoff.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/backoff.rs b/src/backoff.rs index 1c4a5da..83befcb 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -50,6 +50,13 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { request: &mut Request, ) -> Result { loop { + if self.num_retries > 0 { + log_info!( + "Retrying request {} out of {}", + self.num_retries, + self.max_retries + ); + } match self.runner.run(request) { Ok(response) => return Ok(response), Err(err) => { @@ -57,11 +64,6 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { return Err(err); } log_error!("Error: {}", err); - log_info!( - "Backoff enabled re-trying {} out of {}", - self.num_retries + 1, - self.max_retries - ); // https://docs.github.com/en/rest/using-the-rest-api/rate-limits-for-the-rest-api?apiVersion=2022-11-28#exceeding-the-rate-limit match err.downcast_ref::() { Some(error::GRError::RateLimitExceeded(headers)) => { @@ -122,7 +124,7 @@ pub struct Exponential; impl BackOffStrategy for Exponential { fn wait_time(&self, base_wait: Seconds, num_retries: u32) -> Seconds { - log_info!("Exponential backoff strategy"); + log_info!("Exponential backoff strategy enabled"); let wait_time = base_wait + 2u64.pow(num_retries).into(); log_info!("Waiting for {} seconds", wait_time); wait_time From e142c87a44c6d3cf0ccb0666fe9620c922d2da3c Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 09:08:27 -0700 Subject: [PATCH 19/28] Add doc for DynamicFixed throttling --- src/http/throttle.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/http/throttle.rs b/src/http/throttle.rs index b181f2b..6e607e0 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -18,6 +18,10 @@ pub trait ThrottleStrategy { } } +/// Dynamically throttles for the amount of time specified in the throttle_for +/// method using the default trait implementation. As opposed to the PreFixed, +/// which takes a fixed delay in the constructor and throttles for that amount +/// of time every time. pub struct DynamicFixed; impl ThrottleStrategy for DynamicFixed { From bba9478229d880b296c33376324891932acb2650 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 09:18:48 -0700 Subject: [PATCH 20/28] backoff: Add backoff enabled log --- src/backoff.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/backoff.rs b/src/backoff.rs index 83befcb..9b43272 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -42,6 +42,10 @@ impl<'a, R> Backoff<'a, R> { throttler: throttler_strategy, } } + + fn log_backoff_enabled(&self) { + log_info!("Backoff enabled with {} max retries", self.max_retries); + } } impl<'a, R: HttpRunner> Backoff<'a, R> { @@ -79,6 +83,7 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { if self.rate_limit_header.retry_after > Seconds::new(0) { base_wait_time = self.rate_limit_header.retry_after; } + self.log_backoff_enabled(); self.throttler.throttle_for( self.backoff_strategy .wait_time(base_wait_time, self.num_retries) @@ -93,6 +98,7 @@ impl<'a, R: HttpRunner> Backoff<'a, R> { ) => { self.num_retries += 1; if self.num_retries <= self.max_retries { + self.log_backoff_enabled(); self.throttler.throttle_for( self.backoff_strategy .wait_time(self.default_delay_wait, self.num_retries) From 85f53dab6031c888aaac08422946e52e7d189815 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 09:31:26 -0700 Subject: [PATCH 21/28] Ignore .description --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ff7dc0f..1801e02 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ result contracts/__pycache__ lcov.info /doc/book +.description From 1b8b4d10878ab94b3425710208befd44bc8aae07 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 12:11:23 -0700 Subject: [PATCH 22/28] Implement auto throttling --- src/http/throttle.rs | 80 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 78 insertions(+), 2 deletions(-) diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 6e607e0..478052d 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -1,8 +1,15 @@ +//! Throttle module provides different strategies to throttle requests based on +//! flow control headers or provided delays. + use std::thread; use rand::Rng; -use crate::{io::FlowControlHeaders, log_info, time::Milliseconds}; +use crate::{ + io::FlowControlHeaders, + log_debug, log_info, + time::{self, Milliseconds, Seconds}, +}; /// Throttle strategy pub trait ThrottleStrategy { @@ -13,7 +20,7 @@ pub trait ThrottleStrategy { fn throttle(&self, flow_control_headers: Option<&FlowControlHeaders>); /// Throttle for specific amount of time. fn throttle_for(&self, delay: Milliseconds) { - log_info!("Throttling for backoff: {} ms", delay); + log_info!("Throttling for : {} ms", delay); thread::sleep(std::time::Duration::from_millis(*delay)); } } @@ -87,3 +94,72 @@ impl ThrottleStrategy for NoThrottle { log_info!("No throttling enabled"); } } + +/// AutoRate implements an automatic throttling algorithm that limits the +/// rate of requests based on flow control headers from the HTTP response plus a +/// fixed random delay to avoid being predictable and too fast for the server. +/// Inspiration ref: https://en.wikipedia.org/wiki/Leaky_bucket +pub struct AutoRate { + /// Max interval milliseconds added to the automatic throttle. In order to + /// avoid predictability, the minimum range is 1 second. The jitter is the + /// max interval added to the automatic throttle. (1, jitter) milliseconds. + jitter_max: Milliseconds, + jitter_min: Milliseconds, + now: fn() -> Seconds, +} + +const DEFAULT_JITTER_MAX_MILLISECONDS: u64 = 5000; +const DEFAULT_JITTER_MIN_MILLISECONDS: u64 = 1000; + +impl Default for AutoRate { + fn default() -> Self { + Self { + jitter_max: Milliseconds::from(DEFAULT_JITTER_MAX_MILLISECONDS), + jitter_min: Milliseconds::from(DEFAULT_JITTER_MIN_MILLISECONDS), + now: time::now_epoch_seconds, + } + } +} + +impl ThrottleStrategy for AutoRate { + fn throttle(&self, flow_control_headers: Option<&FlowControlHeaders>) { + match flow_control_headers { + Some(headers) => { + let rate_limit_headers = headers.get_rate_limit_header(); + match *rate_limit_headers { + Some(headers) => { + // In order to avoid rate limited, we need to space the + // requests evenly using: time to ratelimit-reset + // (secs)/ratelimit-remaining (requests). + let now = *(self.now)(); + log_debug!("Current epoch: {}", now); + log_debug!("Rate limit reset: {}", headers.reset); + let time_to_reset = headers.reset.saturating_sub(now); + log_debug!("Time to reset: {}", time_to_reset); + log_debug!("Remaining requests: {}", headers.remaining); + let delay = time_to_reset / headers.remaining as u64; + // Avoid predictability and being too fast. We could end up + // being too fast when the amount of remaining requests + // is high and the reset time is low. We additionally + // wait in between jitter_min and jitter_max milliseconds. + let additional_delay = + rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); + let total_delay = delay + additional_delay; + log_info!("AutoRate throttling enabled"); + self.throttle_for(Milliseconds::from(total_delay)); + } + None => { + // When the response has status 304 Not Modified, we don't get + // any rate limiting headers. In this case, we just throttle + // randomly between the min and max jitter. + let rand_delay_jitter = + rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); + log_info!("AutoRate throttling enabled"); + self.throttle_for(Milliseconds::from(rand_delay_jitter)); + } + } + } + None => (), + }; + } +} From a65ae0d8cc4ba946f12ff7c29a83456a6b90357e Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sat, 19 Oct 2024 23:22:58 -0700 Subject: [PATCH 23/28] Enable auto throttle --- src/remote/query.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/remote/query.rs b/src/remote/query.rs index f2a7c1d..5940dcc 100644 --- a/src/remote/query.rs +++ b/src/remote/query.rs @@ -248,7 +248,7 @@ where Some(throttle_time) => Box::new(throttle::PreFixed::new(throttle_time)), None => match throttle_range { Some((min, max)) => Box::new(throttle::Random::new(min, max)), - None => Box::new(throttle::NoThrottle::new()), + None => Box::new(throttle::AutoRate::default()), }, }; let backoff = Backoff::new( From 8f27f69dda45cef6b656c5f3767aa8d80bc82742 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Sun, 20 Oct 2024 17:51:39 -0700 Subject: [PATCH 24/28] pagination: Engage autothrottle after 3 HTTP calls --- src/api_defaults.rs | 7 ++++++ src/backoff.rs | 22 ++++++++--------- src/http.rs | 56 ++++++++++++++++++++++++++++++++++++++------ src/http/throttle.rs | 30 +++++++++++++++++++++--- src/test.rs | 14 +++++++++-- 5 files changed, 106 insertions(+), 23 deletions(-) diff --git a/src/api_defaults.rs b/src/api_defaults.rs index 5525f99..25b89ff 100644 --- a/src/api_defaults.rs +++ b/src/api_defaults.rs @@ -18,3 +18,10 @@ pub const DEFAULT_NUMBER_REQUESTS_MINUTE: u32 = 80; pub const DEFAULT_PER_PAGE: u32 = 30; pub const EXPIRE_IMMEDIATELY: &str = "0s"; + +// Default jitter values for autorate throttling. +pub const DEFAULT_JITTER_MAX_MILLISECONDS: u64 = 5000; +pub const DEFAULT_JITTER_MIN_MILLISECONDS: u64 = 1000; + +// Trigger autorate throttling after 3 API calls. +pub const ENGAGE_AUTORATE_THROTTLING_THRESHOLD: u32 = 3; diff --git a/src/backoff.rs b/src/backoff.rs index 9b43272..9ee2d14 100644 --- a/src/backoff.rs +++ b/src/backoff.rs @@ -206,7 +206,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -228,7 +228,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { @@ -256,7 +256,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -273,7 +273,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 0, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { @@ -300,7 +300,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -329,7 +329,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -359,7 +359,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 3, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -383,7 +383,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -405,7 +405,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { @@ -433,7 +433,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); backoff.retry_on_error(&mut request).unwrap(); @@ -455,7 +455,7 @@ mod tests { .build() .unwrap(); let strategy = Box::new(Exponential); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let mut backoff = Backoff::new(&client, 1, 60, now_mock, strategy, bthrottler); match backoff.retry_on_error(&mut request) { diff --git a/src/http.rs b/src/http.rs index e2f5382..dc750c7 100644 --- a/src/http.rs +++ b/src/http.rs @@ -18,7 +18,7 @@ use std::collections::{hash_map, HashMap}; use std::iter::Iterator; use std::rc::Rc; use std::sync::{Arc, Mutex}; -use throttle::ThrottleStrategy; +use throttle::{ThrottleStrategy, ThrottleStrategyType}; use ureq::Error; pub struct Client { @@ -439,12 +439,19 @@ impl<'a, T: Serialize, R: HttpRunner> Iterator for Pagi }; self.iter += 1; if self.iter < self.max_pages && self.page_url.is_some() { + let response = response.as_ref().unwrap(); // Technically no need to check ok on response, as page_url is Some // (response was Ok) - let response = response.as_ref().unwrap(); if !response.local_cache { - self.throttler - .throttle(Some(response.get_flow_control_headers())); + if self.throttler.strategy() == ThrottleStrategyType::AutoRate { + if self.iter >= api_defaults::ENGAGE_AUTORATE_THROTTLING_THRESHOLD { + self.throttler + .throttle(Some(response.get_flow_control_headers())); + } + } else { + self.throttler + .throttle(Some(response.get_flow_control_headers())); + } } } return Some(response); @@ -827,7 +834,7 @@ mod test { let response3 = response_with_last_page(); let client = Arc::new(MockRunner::new(vec![response3, response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let backoff = Backoff::new( &client, @@ -849,7 +856,7 @@ mod test { let response2 = response_with_last_page(); let client = Arc::new(MockRunner::new(vec![response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let backoff = Backoff::new( &client, @@ -872,7 +879,7 @@ mod test { let response3 = cached_response_last_page(); let client = Arc::new(MockRunner::new(vec![response3, response2, response1])); let request: Request<()> = Request::new("http://localhost", Method::GET); - let throttler = Rc::new(MockThrottler::new()); + let throttler = Rc::new(MockThrottler::new(None)); let bthrottler: Box = Box::new(Rc::clone(&throttler)); let backoff = Backoff::new( &client, @@ -920,4 +927,39 @@ mod test { let responses = paginator.collect::>>(); assert_eq!(5, responses.len()); } + + #[test] + fn test_paginator_auto_throttle_enabled_after_autorate_engage_threshold() { + let response1 = response_with_next_page(); + let response2 = response_with_next_page(); + let response3 = response_with_next_page(); + // Throttles in next two requests + let response4 = response_with_next_page(); + let response5 = response_with_last_page(); + let client = Arc::new(MockRunner::new(vec![ + response5, response4, response3, response2, response1, + ])); + let request: Request<()> = Request::builder() + .method(Method::GET) + .resource(Resource::new("http://localhost", None)) + .max_pages(5) + .build() + .unwrap(); + let throttler = Rc::new(MockThrottler::new(Some( + throttle::ThrottleStrategyType::AutoRate, + ))); + let bthrottler: Box = Box::new(Rc::clone(&throttler)); + let backoff = Backoff::new( + &client, + 0, + 60, + time::now_epoch_seconds, + Box::new(Exponential), + Box::new(throttle::DynamicFixed), + ); + let paginator = Paginator::new(&client, request, "http://localhost", backoff, bthrottler); + let responses = paginator.collect::>>(); + assert_eq!(5, responses.len()); + assert_eq!(2, *throttler.throttled()); + } } diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 478052d..26c952e 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -6,6 +6,7 @@ use std::thread; use rand::Rng; use crate::{ + api_defaults::{DEFAULT_JITTER_MAX_MILLISECONDS, DEFAULT_JITTER_MIN_MILLISECONDS}, io::FlowControlHeaders, log_debug, log_info, time::{self, Milliseconds, Seconds}, @@ -23,6 +24,17 @@ pub trait ThrottleStrategy { log_info!("Throttling for : {} ms", delay); thread::sleep(std::time::Duration::from_millis(*delay)); } + /// Return strategy type + fn strategy(&self) -> ThrottleStrategyType; +} + +#[derive(Clone, Debug, PartialEq)] +pub enum ThrottleStrategyType { + PreFixed, + DynamicFixed, + Random, + AutoRate, + NoThrottle, } /// Dynamically throttles for the amount of time specified in the throttle_for @@ -33,6 +45,9 @@ pub struct DynamicFixed; impl ThrottleStrategy for DynamicFixed { fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) {} + fn strategy(&self) -> ThrottleStrategyType { + ThrottleStrategyType::DynamicFixed + } } pub struct PreFixed { @@ -50,6 +65,9 @@ impl ThrottleStrategy for PreFixed { log_info!("Throttling for: {} ms", self.delay); thread::sleep(std::time::Duration::from_millis(*self.delay)); } + fn strategy(&self) -> ThrottleStrategyType { + ThrottleStrategyType::PreFixed + } } pub struct Random { @@ -78,6 +96,9 @@ impl ThrottleStrategy for Random { log_info!("Sleeping for {} milliseconds", wait_time); thread::sleep(std::time::Duration::from_millis(wait_time)); } + fn strategy(&self) -> ThrottleStrategyType { + ThrottleStrategyType::Random + } } #[derive(Default)] @@ -93,6 +114,9 @@ impl ThrottleStrategy for NoThrottle { fn throttle(&self, _flow_control_headers: Option<&FlowControlHeaders>) { log_info!("No throttling enabled"); } + fn strategy(&self) -> ThrottleStrategyType { + ThrottleStrategyType::NoThrottle + } } /// AutoRate implements an automatic throttling algorithm that limits the @@ -108,9 +132,6 @@ pub struct AutoRate { now: fn() -> Seconds, } -const DEFAULT_JITTER_MAX_MILLISECONDS: u64 = 5000; -const DEFAULT_JITTER_MIN_MILLISECONDS: u64 = 1000; - impl Default for AutoRate { fn default() -> Self { Self { @@ -162,4 +183,7 @@ impl ThrottleStrategy for AutoRate { None => (), }; } + fn strategy(&self) -> ThrottleStrategyType { + ThrottleStrategyType::AutoRate + } } diff --git a/src/test.rs b/src/test.rs index 0bfc829..5cb579e 100644 --- a/src/test.rs +++ b/src/test.rs @@ -5,7 +5,11 @@ pub mod utils { api_traits::ApiOperation, config::ConfigProperties, error, - http::{self, Headers, Request}, + http::{ + self, + throttle::{self, ThrottleStrategyType}, + Headers, Request, + }, io::{self, HttpResponse, HttpRunner, ShellResponse, TaskRunner}, time::Milliseconds, Result, @@ -385,13 +389,15 @@ pub mod utils { pub struct MockThrottler { throttled: RefCell, milliseconds_throttled: RefCell, + strategy: throttle::ThrottleStrategyType, } impl MockThrottler { - pub fn new() -> Self { + pub fn new(strategy_type: Option) -> Self { Self { throttled: RefCell::new(0), milliseconds_throttled: RefCell::new(Milliseconds::new(0)), + strategy: strategy_type.unwrap_or(ThrottleStrategyType::NoThrottle), } } @@ -416,5 +422,9 @@ pub mod utils { let mut milliseconds_throttled = self.milliseconds_throttled.borrow_mut(); *milliseconds_throttled += delay; } + + fn strategy(&self) -> ThrottleStrategyType { + self.strategy.clone() + } } } From 09fcc75fe968cfd8f9fffa95a2399a4354fbb2f9 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Mon, 21 Oct 2024 20:43:27 -0700 Subject: [PATCH 25/28] Auto throttling docs --- doc/src/SUMMARY.md | 1 + doc/src/getting_started.md | 2 ++ doc/src/listing.md | 21 +++++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 doc/src/listing.md diff --git a/doc/src/SUMMARY.md b/doc/src/SUMMARY.md index 230641f..5649752 100644 --- a/doc/src/SUMMARY.md +++ b/doc/src/SUMMARY.md @@ -5,6 +5,7 @@ - [Installation](./installation.md) - [Configuration](./configuration.md) - [Caching](./caching.md) + - [Listing data](./listing.md) - [Gitar commands](./cmds/index.md) - [Merge requests](./cmds/merge_request.md) - [Pipelines](./cmds/pipeline.md) diff --git a/doc/src/getting_started.md b/doc/src/getting_started.md index 4174263..117e9ee 100644 --- a/doc/src/getting_started.md +++ b/doc/src/getting_started.md @@ -5,3 +5,5 @@ a configuration file. Let's get started: - [Installation](./installation.md) - [Configuration](./configuration.md) +- [Caching](./caching.md) +- [Listing data](./listing.md) diff --git a/doc/src/listing.md b/doc/src/listing.md new file mode 100644 index 0000000..86d27fd --- /dev/null +++ b/doc/src/listing.md @@ -0,0 +1,21 @@ +# The list subcommand + + + +The list subcommand is used to pull data from specific resources such as +pipelines and merge requests. Gitar implements best practices to avoid being +rate limited, caches responses and uses pagination to pull the required data +using the `--from-page` and `--to-page` flags. + +## Auto throttling + +Gitar will automatically throttle the requests after three consecutive HTTP +calls have been made. The throttling is based on the rate limit headers plus a +jitter interval between 1 and 5 seconds. The user can also specify a fixed +throttle interval with `--throttle` or a random one with `--throttle-range`. + +## Max pages to fetch + +If no configuration is provided, the default is a max of 10 pages. This can be +overriden with `--to-page` where it will fetch up to the specified page or a +range of pages with `--from-page` and `--to-page`. From 93b6cab203a3ea682573137c809b5c12b5d6d7a3 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Mon, 21 Oct 2024 20:44:11 -0700 Subject: [PATCH 26/28] Cargo bump version --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f9a3ce2..0ecec91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -873,7 +873,7 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git-ar" -version = "1.0.4" +version = "1.0.5" dependencies = [ "anyhow", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 474ebbb..072a378 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "git-ar" -version = "1.0.4" +version = "1.0.5" edition = "2021" license = "MIT" From 22df07a7287e4b450dd32fae8c54d944311bd48b Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Mon, 21 Oct 2024 20:52:12 -0700 Subject: [PATCH 27/28] Cargo fmt --- src/github/cicd.rs | 2 +- src/github/container_registry.rs | 2 +- src/github/gist.rs | 2 +- src/github/merge_request.rs | 2 +- src/github/release.rs | 2 +- src/github/trending.rs | 2 +- src/github/user.rs | 2 +- src/gitlab/cicd.rs | 2 +- src/gitlab/container_registry.rs | 2 +- src/gitlab/gist.rs | 2 +- src/gitlab/merge_request.rs | 2 +- src/gitlab/project.rs | 2 +- src/gitlab/release.rs | 2 +- src/gitlab/trending.rs | 2 +- src/gitlab/user.rs | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/github/cicd.rs b/src/github/cicd.rs index 5d443a7..e17b91a 100644 --- a/src/github/cicd.rs +++ b/src/github/cicd.rs @@ -7,7 +7,7 @@ use crate::cmds::cicd::{ use crate::remote::query; use crate::{ api_traits::Cicd, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, }; use crate::{http, time, Result}; diff --git a/src/github/container_registry.rs b/src/github/container_registry.rs index e89440c..f8f62d1 100644 --- a/src/github/container_registry.rs +++ b/src/github/container_registry.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::ContainerRegistry, cmds::docker::{DockerListBodyArgs, ImageMetadata, RegistryRepository, RepositoryTag}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, Result, }; diff --git a/src/github/gist.rs b/src/github/gist.rs index bb454d9..faf6db2 100644 --- a/src/github/gist.rs +++ b/src/github/gist.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::{ApiOperation, CodeGist, NumberDeltaErr}, cmds::gist::{Gist, GistListBodyArgs}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::{query, URLQueryParamBuilder}, Result, }; diff --git a/src/github/merge_request.rs b/src/github/merge_request.rs index 1c4f035..d591328 100644 --- a/src/github/merge_request.rs +++ b/src/github/merge_request.rs @@ -11,7 +11,7 @@ use crate::{ project::MrMemberType, }, http::{self, Body}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, json_loads, remote::query, }; diff --git a/src/github/release.rs b/src/github/release.rs index c3a4495..fc6e053 100644 --- a/src/github/release.rs +++ b/src/github/release.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::{ApiOperation, Deploy, DeployAsset, NumberDeltaErr}, cmds::release::{Release, ReleaseAssetListBodyArgs, ReleaseAssetMetadata, ReleaseBodyArgs}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::query, Result, }; diff --git a/src/github/trending.rs b/src/github/trending.rs index 8376210..1e4042e 100644 --- a/src/github/trending.rs +++ b/src/github/trending.rs @@ -4,7 +4,7 @@ use crate::{ api_traits::{ApiOperation, TrendingProjectURL}, cmds::trending::TrendingProject, http::Headers, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::query, Result, }; diff --git a/src/github/user.rs b/src/github/user.rs index 40f205b..2eb6d18 100644 --- a/src/github/user.rs +++ b/src/github/user.rs @@ -2,7 +2,7 @@ use super::Github; use crate::api_traits::{ApiOperation, UserInfo}; use crate::cmds::project::Member; use crate::cmds::user::UserCliArgs; -use crate::io::{HttpRunner, HttpResponse}; +use crate::io::{HttpResponse, HttpRunner}; use crate::remote::query; use crate::Result; diff --git a/src/gitlab/cicd.rs b/src/gitlab/cicd.rs index 4532f5c..bd18e41 100644 --- a/src/gitlab/cicd.rs +++ b/src/gitlab/cicd.rs @@ -8,7 +8,7 @@ use crate::http::{self, Body, Headers}; use crate::remote::{query, URLQueryParamBuilder}; use crate::{ api_traits::Cicd, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, }; use crate::{time, Result}; diff --git a/src/gitlab/container_registry.rs b/src/gitlab/container_registry.rs index e1a8b95..26a32b2 100644 --- a/src/gitlab/container_registry.rs +++ b/src/gitlab/container_registry.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::{ApiOperation, ContainerRegistry}, cmds::docker::{DockerListBodyArgs, ImageMetadata, RegistryRepository, RepositoryTag}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::query, Result, }; diff --git a/src/gitlab/gist.rs b/src/gitlab/gist.rs index 5deea73..7e6ea81 100644 --- a/src/gitlab/gist.rs +++ b/src/gitlab/gist.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::CodeGist, cmds::gist::{Gist, GistListBodyArgs}, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, }; use super::Gitlab; diff --git a/src/gitlab/merge_request.rs b/src/gitlab/merge_request.rs index 91bfb58..3f306b8 100644 --- a/src/gitlab/merge_request.rs +++ b/src/gitlab/merge_request.rs @@ -12,7 +12,7 @@ use crate::remote::query; use crate::Result; use crate::{ api_traits::MergeRequest, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, }; use crate::json_loads; diff --git a/src/gitlab/project.rs b/src/gitlab/project.rs index 334ce25..1e1af03 100644 --- a/src/gitlab/project.rs +++ b/src/gitlab/project.rs @@ -3,7 +3,7 @@ use crate::cli::browse::BrowseOptions; use crate::cmds::project::{Member, Project, ProjectListBodyArgs, Tag}; use crate::error::GRError; use crate::gitlab::encode_path; -use crate::io::{CmdInfo, HttpRunner, HttpResponse}; +use crate::io::{CmdInfo, HttpResponse, HttpRunner}; use crate::remote::query; use crate::remote::URLQueryParamBuilder; use crate::Result; diff --git a/src/gitlab/release.rs b/src/gitlab/release.rs index dc752d3..449c788 100644 --- a/src/gitlab/release.rs +++ b/src/gitlab/release.rs @@ -2,7 +2,7 @@ use crate::{ api_traits::{ApiOperation, Deploy, DeployAsset, NumberDeltaErr}, cmds::release::{Release, ReleaseAssetListBodyArgs, ReleaseAssetMetadata, ReleaseBodyArgs}, http, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::query, Result, }; diff --git a/src/gitlab/trending.rs b/src/gitlab/trending.rs index 8e30204..8e09ffc 100644 --- a/src/gitlab/trending.rs +++ b/src/gitlab/trending.rs @@ -1,7 +1,7 @@ use crate::{ api_traits::TrendingProjectURL, cmds::trending::TrendingProject, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, Result, }; diff --git a/src/gitlab/user.rs b/src/gitlab/user.rs index a3061d3..7d81dcc 100644 --- a/src/gitlab/user.rs +++ b/src/gitlab/user.rs @@ -2,7 +2,7 @@ use crate::{ api_traits::{ApiOperation, UserInfo}, cmds::{project::Member, user::UserCliArgs}, error::GRError, - io::{HttpRunner, HttpResponse}, + io::{HttpResponse, HttpRunner}, remote::{self, query}, Result, }; From af7ed490c42eb4bd9307ef53106472f8034c0771 Mon Sep 17 00:00:00 2001 From: Jordi Carrillo Bosch Date: Mon, 21 Oct 2024 20:55:53 -0700 Subject: [PATCH 28/28] Clippy fixes --- src/http/throttle.rs | 69 +++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/src/http/throttle.rs b/src/http/throttle.rs index 26c952e..f0f5425 100644 --- a/src/http/throttle.rs +++ b/src/http/throttle.rs @@ -144,44 +144,41 @@ impl Default for AutoRate { impl ThrottleStrategy for AutoRate { fn throttle(&self, flow_control_headers: Option<&FlowControlHeaders>) { - match flow_control_headers { - Some(headers) => { - let rate_limit_headers = headers.get_rate_limit_header(); - match *rate_limit_headers { - Some(headers) => { - // In order to avoid rate limited, we need to space the - // requests evenly using: time to ratelimit-reset - // (secs)/ratelimit-remaining (requests). - let now = *(self.now)(); - log_debug!("Current epoch: {}", now); - log_debug!("Rate limit reset: {}", headers.reset); - let time_to_reset = headers.reset.saturating_sub(now); - log_debug!("Time to reset: {}", time_to_reset); - log_debug!("Remaining requests: {}", headers.remaining); - let delay = time_to_reset / headers.remaining as u64; - // Avoid predictability and being too fast. We could end up - // being too fast when the amount of remaining requests - // is high and the reset time is low. We additionally - // wait in between jitter_min and jitter_max milliseconds. - let additional_delay = - rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); - let total_delay = delay + additional_delay; - log_info!("AutoRate throttling enabled"); - self.throttle_for(Milliseconds::from(total_delay)); - } - None => { - // When the response has status 304 Not Modified, we don't get - // any rate limiting headers. In this case, we just throttle - // randomly between the min and max jitter. - let rand_delay_jitter = - rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); - log_info!("AutoRate throttling enabled"); - self.throttle_for(Milliseconds::from(rand_delay_jitter)); - } + if let Some(headers) = flow_control_headers { + let rate_limit_headers = headers.get_rate_limit_header(); + match *rate_limit_headers { + Some(headers) => { + // In order to avoid rate limited, we need to space the + // requests evenly using: time to ratelimit-reset + // (secs)/ratelimit-remaining (requests). + let now = *(self.now)(); + log_debug!("Current epoch: {}", now); + log_debug!("Rate limit reset: {}", headers.reset); + let time_to_reset = headers.reset.saturating_sub(now); + log_debug!("Time to reset: {}", time_to_reset); + log_debug!("Remaining requests: {}", headers.remaining); + let delay = time_to_reset / headers.remaining as u64; + // Avoid predictability and being too fast. We could end up + // being too fast when the amount of remaining requests + // is high and the reset time is low. We additionally + // wait in between jitter_min and jitter_max milliseconds. + let additional_delay = + rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); + let total_delay = delay + additional_delay; + log_info!("AutoRate throttling enabled"); + self.throttle_for(Milliseconds::from(total_delay)); + } + None => { + // When the response has status 304 Not Modified, we don't get + // any rate limiting headers. In this case, we just throttle + // randomly between the min and max jitter. + let rand_delay_jitter = + rand::thread_rng().gen_range(*self.jitter_min..=*self.jitter_max); + log_info!("AutoRate throttling enabled"); + self.throttle_for(Milliseconds::from(rand_delay_jitter)); } } - None => (), - }; + } } fn strategy(&self) -> ThrottleStrategyType { ThrottleStrategyType::AutoRate