From bbe52cc10608051746a8efd21e003da93cc126a9 Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 13:40:32 +0530 Subject: [PATCH 1/8] fix(executor): reuse HTTP client and implement global retry --- src/client.rs | 18 ++++++++++++++++-- src/executor.rs | 40 +++++++++++++++++++++------------------- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/client.rs b/src/client.rs index e0abe409..3518209f 100644 --- a/src/client.rs +++ b/src/client.rs @@ -24,7 +24,7 @@ const MAX_RETRIES: u32 = 3; /// or misconfigured server from hanging the process indefinitely. const MAX_RETRY_DELAY_SECS: u64 = 60; -/// Send an HTTP request with automatic retry on 429 (rate limit) responses. +/// Send an HTTP request with automatic retry on 429 (rate limit) and transient 5xx responses. /// Respects the `Retry-After` header; falls back to exponential backoff (1s, 2s, 4s). pub async fn send_with_retry( build_request: impl Fn() -> reqwest::RequestBuilder, @@ -32,7 +32,21 @@ pub async fn send_with_retry( for attempt in 0..MAX_RETRIES { let resp = build_request().send().await?; - if resp.status() != reqwest::StatusCode::TOO_MANY_REQUESTS { + let status = resp.status(); + if status.is_success() + || !matches!( + status, + reqwest::StatusCode::TOO_MANY_REQUESTS + | reqwest::StatusCode::INTERNAL_SERVER_ERROR + | reqwest::StatusCode::BAD_GATEWAY + | reqwest::StatusCode::SERVICE_UNAVAILABLE + ) + { + return Ok(resp); + } + + // Only sleep if we have retries left + if attempt == MAX_RETRIES - 1 { return Ok(resp); } diff --git a/src/executor.rs b/src/executor.rs index b6b8dadf..b6785649 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -157,7 +157,7 @@ fn parse_and_validate_inputs( /// Build an HTTP request with auth, query params, page token, and body/multipart attachment. #[allow(clippy::too_many_arguments)] -async fn build_http_request( +fn build_http_request( client: &reqwest::Client, method: &RestMethod, input: &ExecutionInput, @@ -212,7 +212,7 @@ async fn build_http_request( build_multipart_bytes(&input.body, data, content_type)? } UploadSource::File { path, content_type } => { - let file_meta = tokio::fs::metadata(path).await.map_err(|e| { + let file_meta = std::fs::metadata(path).map_err(|e| { GwsError::Validation(format!( "Failed to get metadata for upload file '{}': {}", path, e @@ -431,27 +431,32 @@ pub async fn execute_method( return Ok(None); } + let client = crate::client::build_client()?; let mut page_token: Option = None; let mut pages_fetched: u32 = 0; let mut captured_values = Vec::new(); loop { - let client = crate::client::build_client()?; - let request = build_http_request( - &client, - method, - &input, - token, - &auth_method, - page_token.as_deref(), - pages_fetched, - &upload, - ) - .await?; - let method_id = method.id.as_deref().unwrap_or("unknown"); let start = std::time::Instant::now(); - let response = request.send().await.context("HTTP request failed")?; + + // Wrap the request builder in send_with_retry to handle rate limits and transient 5xx + let response = crate::client::send_with_retry(|| { + build_http_request( + &client, + method, + &input, + token, + &auth_method, + page_token.as_deref(), + pages_fetched, + &upload, + ) + .expect("Request builder should not fail during retry") + }) + .await + .context("HTTP request failed")?; + let latency_ms = start.elapsed().as_millis() as u64; let status = response.status(); @@ -2311,7 +2316,6 @@ async fn test_post_without_body_sets_content_length_zero() { 0, &None, ) - .await .unwrap(); let built = request.build().unwrap(); @@ -2351,7 +2355,6 @@ async fn test_post_with_body_does_not_add_content_length_zero() { 0, &None, ) - .await .unwrap(); let built = request.build().unwrap(); @@ -2389,7 +2392,6 @@ async fn test_get_does_not_set_content_length_zero() { 0, &None, ) - .await .unwrap(); let built = request.build().unwrap(); From 5a0e13b1254aef333549cb0f9d2584a9d10ebbaa Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 14:15:22 +0530 Subject: [PATCH 2/8] chore: add changeset for engine performance and retry --- .changeset/engine-perf-retry.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changeset/engine-perf-retry.md diff --git a/.changeset/engine-perf-retry.md b/.changeset/engine-perf-retry.md new file mode 100644 index 00000000..5c417db6 --- /dev/null +++ b/.changeset/engine-perf-retry.md @@ -0,0 +1 @@ +---\n"gws": patch\n---\n\nfix(executor): reuse HTTP client across paginated requests and implement global retry for 429/5xx status codes From d5b8820b3a20b83fa2a782e9b418aee0dd21bed2 Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 14:32:51 +0530 Subject: [PATCH 3/8] refactor(retry): handle fallible builders in send_with_retry and remove .expect() --- src/client.rs | 11 ++++++----- src/executor.rs | 5 ++--- src/helpers/calendar.rs | 4 ++-- src/helpers/gmail/mod.rs | 4 ++-- src/helpers/gmail/reply.rs | 4 ++-- src/helpers/gmail/triage.rs | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/client.rs b/src/client.rs index 3518209f..d16bf409 100644 --- a/src/client.rs +++ b/src/client.rs @@ -26,11 +26,12 @@ const MAX_RETRY_DELAY_SECS: u64 = 60; /// Send an HTTP request with automatic retry on 429 (rate limit) and transient 5xx responses. /// Respects the `Retry-After` header; falls back to exponential backoff (1s, 2s, 4s). -pub async fn send_with_retry( - build_request: impl Fn() -> reqwest::RequestBuilder, -) -> Result { +pub async fn send_with_retry(build_request: F) -> anyhow::Result +where + F: Fn() -> anyhow::Result, +{ for attempt in 0..MAX_RETRIES { - let resp = build_request().send().await?; + let resp = build_request()?.send().await?; let status = resp.status(); if status.is_success() @@ -60,7 +61,7 @@ pub async fn send_with_retry( } // Final attempt — return whatever we get - build_request().send().await + Ok(build_request()?.send().await?) } /// Compute the retry delay from a Retry-After header value and attempt number. diff --git a/src/executor.rs b/src/executor.rs index b6785649..ac538e2f 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -442,7 +442,7 @@ pub async fn execute_method( // Wrap the request builder in send_with_retry to handle rate limits and transient 5xx let response = crate::client::send_with_retry(|| { - build_http_request( + Ok(build_http_request( &client, method, &input, @@ -451,8 +451,7 @@ pub async fn execute_method( page_token.as_deref(), pages_fetched, &upload, - ) - .expect("Request builder should not fail during retry") + )?) }) .await .context("HTTP request failed")?; diff --git a/src/helpers/calendar.rs b/src/helpers/calendar.rs index cf28b249..f25adddf 100644 --- a/src/helpers/calendar.rs +++ b/src/helpers/calendar.rs @@ -332,7 +332,7 @@ async fn handle_agenda(matches: &ArgMatches) -> Result<(), GwsError> { ); let resp = crate::client::send_with_retry(|| { - client + Ok(client .get(&events_url) .query(&[ ("timeMin", time_min.as_str()), @@ -341,7 +341,7 @@ async fn handle_agenda(matches: &ArgMatches) -> Result<(), GwsError> { ("orderBy", "startTime"), ("maxResults", "50"), ]) - .bearer_auth(token) + .bearer_auth(token)) }) .await; diff --git a/src/helpers/gmail/mod.rs b/src/helpers/gmail/mod.rs index b9ed7c8e..24902891 100644 --- a/src/helpers/gmail/mod.rs +++ b/src/helpers/gmail/mod.rs @@ -335,10 +335,10 @@ pub(super) async fn fetch_message_metadata( ); let resp = crate::client::send_with_retry(|| { - client + Ok(client .get(&url) .bearer_auth(token) - .query(&[("format", "full")]) + .query(&[("format", "full")])) }) .await .map_err(|e| GwsError::Other(anyhow::anyhow!("Failed to fetch message: {e}")))?; diff --git a/src/helpers/gmail/reply.rs b/src/helpers/gmail/reply.rs index 211bf98e..cca2dd6c 100644 --- a/src/helpers/gmail/reply.rs +++ b/src/helpers/gmail/reply.rs @@ -144,9 +144,9 @@ pub(super) struct ReplyConfig { async fn fetch_user_email(client: &reqwest::Client, token: &str) -> Result { let resp = crate::client::send_with_retry(|| { - client + Ok(client .get("https://gmail.googleapis.com/gmail/v1/users/me/profile") - .bearer_auth(token) + .bearer_auth(token)) }) .await .map_err(|e| GwsError::Other(anyhow::anyhow!("Failed to fetch user profile: {e}")))?; diff --git a/src/helpers/gmail/triage.rs b/src/helpers/gmail/triage.rs index 8bc58804..54534e8b 100644 --- a/src/helpers/gmail/triage.rs +++ b/src/helpers/gmail/triage.rs @@ -106,7 +106,7 @@ pub async fn handle_triage(matches: &ArgMatches) -> Result<(), GwsError> { ); let get_resp = crate::client::send_with_retry(|| { - client.get(&get_url).bearer_auth(token) + Ok(client.get(&get_url).bearer_auth(token)) }) .await .ok()?; From 23cd05f31c57d6fdc1b9522b995f83088321e0bc Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 14:37:08 +0530 Subject: [PATCH 4/8] fix(retry): ensure final retry attempt is reached --- src/client.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/client.rs b/src/client.rs index d16bf409..84474cfb 100644 --- a/src/client.rs +++ b/src/client.rs @@ -46,11 +46,7 @@ where return Ok(resp); } - // Only sleep if we have retries left - if attempt == MAX_RETRIES - 1 { - return Ok(resp); - } - + // If we still have retries, sleep and continue let header_value = resp .headers() .get("retry-after") From c6810fc7d9b76423f3267c6d68ab869d7b9f4eb7 Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 15:07:55 +0530 Subject: [PATCH 5/8] chore: correct changeset package name --- .changeset/engine-perf-retry.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changeset/engine-perf-retry.md b/.changeset/engine-perf-retry.md index 5c417db6..f6a70974 100644 --- a/.changeset/engine-perf-retry.md +++ b/.changeset/engine-perf-retry.md @@ -1 +1 @@ ----\n"gws": patch\n---\n\nfix(executor): reuse HTTP client across paginated requests and implement global retry for 429/5xx status codes +---\n"@googleworkspace/cli": patch\n---\n\nfix(executor): reuse HTTP client across paginated requests and implement global retry for 429/5xx status codes From 9058d9e146729897867cad4d69388330203c9cdd Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 15:17:32 +0530 Subject: [PATCH 6/8] fix(executor): avoid blocking I/O in async context by pre-resolving metadata --- src/executor.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/executor.rs b/src/executor.rs index ac538e2f..dc6e27fd 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -166,6 +166,7 @@ fn build_http_request( page_token: Option<&str>, pages_fetched: u32, upload: &Option>, + resolved_file_size: Option, ) -> Result { let mut request = match method.http_method.as_str() { "GET" => client.get(&input.full_url), @@ -212,13 +213,11 @@ fn build_http_request( build_multipart_bytes(&input.body, data, content_type)? } UploadSource::File { path, content_type } => { - let file_meta = std::fs::metadata(path).map_err(|e| { - GwsError::Validation(format!( - "Failed to get metadata for upload file '{}': {}", - path, e + let file_size = resolved_file_size.ok_or_else(|| { + GwsError::Other(anyhow::anyhow!( + "Internal error: file size not resolved for upload" )) })?; - let file_size = file_meta.len(); let media_mime = resolve_upload_mime(*content_type, Some(path), &input.body); build_multipart_stream(&input.body, path, file_size, &media_mime)? } @@ -436,6 +435,14 @@ pub async fn execute_method( let mut pages_fetched: u32 = 0; let mut captured_values = Vec::new(); + // Resolve file metadata once before loop if uploading a file + let mut resolved_file_size: Option = None; + if let Some(UploadSource::File { path, .. }) = &upload { + resolved_file_size = Some(tokio::fs::metadata(path).await.map_err(|e| { + GwsError::Validation(format!("Failed to get metadata for upload file '{}': {}", path, e)) + })?.len()); + } + loop { let method_id = method.id.as_deref().unwrap_or("unknown"); let start = std::time::Instant::now(); @@ -451,6 +458,7 @@ pub async fn execute_method( page_token.as_deref(), pages_fetched, &upload, + resolved_file_size, )?) }) .await From f226834842c2eb07302736193a3a520e375d0861 Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 15:17:59 +0530 Subject: [PATCH 7/8] fix(retry): also retry on transient network errors (timeouts and connection failures) --- src/client.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 84474cfb..0d830b8e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -31,7 +31,18 @@ where F: Fn() -> anyhow::Result, { for attempt in 0..MAX_RETRIES { - let resp = build_request()?.send().await?; + let req_result = build_request()?.send().await; + + let resp = match req_result { + Ok(r) => r, + Err(e) if attempt < MAX_RETRIES - 1 && (e.is_timeout() || e.is_connect()) => { + let retry_after = compute_retry_delay(None, attempt); + tracing::debug!(error = %e, attempt, retry_after, "Retrying on network error"); + tokio::time::sleep(std::time::Duration::from_secs(retry_after)).await; + continue; + } + Err(e) => return Err(e.into()), + }; let status = resp.status(); if status.is_success() @@ -53,6 +64,7 @@ where .and_then(|v| v.to_str().ok()); let retry_after = compute_retry_delay(header_value, attempt); + tracing::debug!(status = %status, attempt, retry_after, "Retrying on server error"); tokio::time::sleep(std::time::Duration::from_secs(retry_after)).await; } From d4168b4d965f402c6444daa7790c0f094dd57cc4 Mon Sep 17 00:00:00 2001 From: dumko2001 Date: Wed, 18 Mar 2026 16:41:53 +0530 Subject: [PATCH 8/8] fix(retry): ensure consistent attempts for network and server errors --- src/client.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/client.rs b/src/client.rs index 0d830b8e..5786df09 100644 --- a/src/client.rs +++ b/src/client.rs @@ -30,12 +30,13 @@ pub async fn send_with_retry(build_request: F) -> anyhow::Result anyhow::Result, { - for attempt in 0..MAX_RETRIES { + // total attempts = MAX_RETRIES + 1 + for attempt in 0..=MAX_RETRIES { let req_result = build_request()?.send().await; let resp = match req_result { Ok(r) => r, - Err(e) if attempt < MAX_RETRIES - 1 && (e.is_timeout() || e.is_connect()) => { + Err(e) if attempt < MAX_RETRIES && (e.is_timeout() || e.is_connect()) => { let retry_after = compute_retry_delay(None, attempt); tracing::debug!(error = %e, attempt, retry_after, "Retrying on network error"); tokio::time::sleep(std::time::Duration::from_secs(retry_after)).await; @@ -58,18 +59,21 @@ where } // If we still have retries, sleep and continue - let header_value = resp - .headers() - .get("retry-after") - .and_then(|v| v.to_str().ok()); - let retry_after = compute_retry_delay(header_value, attempt); - - tracing::debug!(status = %status, attempt, retry_after, "Retrying on server error"); - tokio::time::sleep(std::time::Duration::from_secs(retry_after)).await; + if attempt < MAX_RETRIES { + let header_value = resp + .headers() + .get("retry-after") + .and_then(|v| v.to_str().ok()); + let retry_after = compute_retry_delay(header_value, attempt); + + tracing::debug!(status = %status, attempt, retry_after, "Retrying on server error"); + tokio::time::sleep(std::time::Duration::from_secs(retry_after)).await; + } else { + return Ok(resp); + } } - // Final attempt — return whatever we get - Ok(build_request()?.send().await?) + unreachable!("Loop should return on last attempt") } /// Compute the retry delay from a Retry-After header value and attempt number.