From 84a09b189c73e9a21ec7ec3ce87804f19375155f Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:18:09 -0400 Subject: [PATCH 01/17] remove server module inception --- src/server.rs | 298 +++++++++++++++++++++++++------------------------- 1 file changed, 148 insertions(+), 150 deletions(-) diff --git a/src/server.rs b/src/server.rs index 6856557..14981d4 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,181 +1,179 @@ #![allow(unused)] -pub mod server { - use crate::constants::{self, TAG, VERSION}; - use crate::error::{AppError, AppResult}; - use crate::groups; - use crate::logging::android_log; - use crate::repos; - use crate::{log_debug, log_error, log_info}; - use actix_web::{delete, get, patch, post, put}; - use actix_web::{web, App, Error as ActixError, HttpResponse, HttpServer, Responder}; - use anyhow::{anyhow, Context, Result}; - use base64_url; - use futures::{future, lock}; - use num_cpus; - use once_cell::sync::OnceCell; - use save_dweb_backend::backend::Backend; - use serde::{Deserialize, Serialize}; - use serde_json::json; - use std::cmp; - use std::fs; - use std::net::Ipv4Addr; - use std::time::{Duration, Instant}; - use std::path::Path; - use std::sync::Arc; - use std::{env, panic}; - use thiserror::Error; - use tokio::sync::Mutex as TokioMutex; - use veilid_core::{ - vld0_generate_keypair, CryptoKey, TypedKey, VeilidUpdate, CRYPTO_KIND_VLD0, - VALID_CRYPTO_KINDS, - }; - use crate::actix_route_dumper::RouteDumper; - use crate::models::SnowbirdGroup; - - #[derive(Error, Debug)] - pub enum BackendError { - #[error("Backend not initialized")] - NotInitialized, - - #[error("Failed to initialize backend: {0}")] - InitializationError(#[from] std::io::Error), - } - - pub static BACKEND: OnceCell>> = OnceCell::new(); - - pub async fn get_backend<'a>( - ) -> Result + 'a, anyhow::Error> { - match BACKEND.get() { - Some(backend) => Ok(backend.lock().await), - None => Err(anyhow!("Backend not initialized")), - } - } +use crate::constants::{self, TAG, VERSION}; +use crate::error::{AppError, AppResult}; +use crate::groups; +use crate::logging::android_log; +use crate::repos; +use crate::{log_debug, log_error, log_info}; +use actix_web::{delete, get, patch, post, put}; +use actix_web::{web, App, Error as ActixError, HttpResponse, HttpServer, Responder}; +use anyhow::{anyhow, Context, Result}; +use base64_url; +use futures::{future, lock}; +use num_cpus; +use once_cell::sync::OnceCell; +use save_dweb_backend::backend::Backend; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::cmp; +use std::fs; +use std::net::Ipv4Addr; +use std::time::{Duration, Instant}; +use std::path::Path; +use std::sync::Arc; +use std::{env, panic}; +use thiserror::Error; +use tokio::sync::Mutex as TokioMutex; +use veilid_core::{ + vld0_generate_keypair, CryptoKey, TypedKey, VeilidUpdate, CRYPTO_KIND_VLD0, + VALID_CRYPTO_KINDS, +}; +use crate::actix_route_dumper::RouteDumper; +use crate::models::SnowbirdGroup; + +#[derive(Error, Debug)] +pub enum BackendError { + #[error("Backend not initialized")] + NotInitialized, + + #[error("Failed to initialize backend: {0}")] + InitializationError(#[from] std::io::Error), +} - pub fn init_backend(backend_path: &Path) -> Arc> { - Arc::new(TokioMutex::new( - Backend::new(backend_path).expect("Failed to create Backend."), - )) - } +pub static BACKEND: OnceCell>> = OnceCell::new(); - #[get("/status")] - async fn status() -> impl Responder { - HttpResponse::Ok().json(serde_json::json!({ - "status": "running", - "version": *VERSION - })) +pub async fn get_backend<'a>( +) -> Result + 'a, anyhow::Error> { + match BACKEND.get() { + Some(backend) => Ok(backend.lock().await), + None => Err(anyhow!("Backend not initialized")), } +} - #[get("/health")] - async fn health() -> impl Responder { - HttpResponse::Ok().json(serde_json::json!({ - "status": "OK" - })) - } +pub fn init_backend(backend_path: &Path) -> Arc> { + Arc::new(TokioMutex::new( + Backend::new(backend_path).expect("Failed to create Backend."), + )) +} - #[derive(Deserialize)] - struct JoinGroupRequest { - uri: String - } +#[get("/status")] +async fn status() -> impl Responder { + HttpResponse::Ok().json(serde_json::json!({ + "status": "running", + "version": *VERSION + })) +} - #[post("memberships")] - async fn join_group(body: web::Json) -> AppResult { - let join_request_data = body.into_inner(); - let backend = get_backend().await?; - let boxed_group = backend.join_from_url(&join_request_data.uri).await?; - let snowbird_group: SnowbirdGroup = boxed_group.as_ref().into(); +#[get("/health")] +async fn health() -> impl Responder { + HttpResponse::Ok().json(serde_json::json!({ + "status": "OK" + })) +} - Ok(HttpResponse::Ok().json(json!({ "group" : snowbird_group }))) - } +#[derive(Deserialize)] +struct JoinGroupRequest { + uri: String +} - fn actix_log(message: &str) { - log_debug!(TAG, "Actix log: {}", message); - } +#[post("memberships")] +async fn join_group(body: web::Json) -> AppResult { + let join_request_data = body.into_inner(); + let backend = get_backend().await?; + let boxed_group = backend.join_from_url(&join_request_data.uri).await?; + let snowbird_group: SnowbirdGroup = boxed_group.as_ref().into(); - fn log_perf(message: &str, duration: Duration) { - let total_ms = duration.as_millis(); - let rounded_tenths = (total_ms as f64 / 100.0).round() / 10.0; - log_info!(TAG, "{} after {:.1} s", message, rounded_tenths); - } + Ok(HttpResponse::Ok().json(json!({ "group" : snowbird_group }))) +} - fn get_optimal_worker_count() -> usize { - let cpu_count = num_cpus::get(); - //let worker_count = cmp::max(1, cmp::min(cpu_count / 2, 4)); - - log_debug!(TAG, "Detected {} CPUs", cpu_count); +fn actix_log(message: &str) { + log_debug!(TAG, "Actix log: {}", message); +} - // This whole thing was an attempt at optimization, but since - // we're only ever handling one request at a time let's keep - // things lightweight for now. - 1 - } +fn log_perf(message: &str, duration: Duration) { + let total_ms = duration.as_millis(); + let rounded_tenths = (total_ms as f64 / 100.0).round() / 10.0; + log_info!(TAG, "{} after {:.1} s", message, rounded_tenths); +} - pub async fn start(backend_base_directory: &str, server_socket_path: &str) -> anyhow::Result<()> { - log_debug!(TAG, "start_server: Using socket path: {:?}", server_socket_path); +fn get_optimal_worker_count() -> usize { + let cpu_count = num_cpus::get(); + //let worker_count = cmp::max(1, cmp::min(cpu_count / 2, 4)); + + log_debug!(TAG, "Detected {} CPUs", cpu_count); - let worker_count = get_optimal_worker_count(); + // This whole thing was an attempt at optimization, but since + // we're only ever handling one request at a time let's keep + // things lightweight for now. + 1 +} - let start_instant = Instant::now(); - log_info!(TAG, "Starting server initialization..."); +pub async fn start(backend_base_directory: &str, server_socket_path: &str) -> anyhow::Result<()> { + log_debug!(TAG, "start_server: Using socket path: {:?}", server_socket_path); - let lan_address = Ipv4Addr::UNSPECIFIED; // 0.0.0.0 - let lan_port = 8080; + let worker_count = get_optimal_worker_count(); - panic::set_hook(Box::new(|panic_info| { - log_error!(TAG, "Panic occurred: {:?}", panic_info); - })); + let start_instant = Instant::now(); + log_info!(TAG, "Starting server initialization..."); - if env::var("HOME").is_err() { - env::set_var("HOME", backend_base_directory); - } + let lan_address = Ipv4Addr::UNSPECIFIED; // 0.0.0.0 + let lan_port = 8080; - let backend_path = Path::new(backend_base_directory); + panic::set_hook(Box::new(|panic_info| { + log_error!(TAG, "Panic occurred: {:?}", panic_info); + })); - BACKEND.get_or_init(|| init_backend(backend_path)); + if env::var("HOME").is_err() { + env::set_var("HOME", backend_base_directory); + } - { - let mut backend = get_backend().await?; + let backend_path = Path::new(backend_base_directory); - backend.start().await.context("Backend failed to start"); - } + BACKEND.get_or_init(|| init_backend(backend_path)); - log_perf("Backend started", start_instant.elapsed()); + { + let mut backend = get_backend().await?; - let web_server = HttpServer::new(move || { - let app_start = Instant::now(); - let app = App::new() - .wrap(RouteDumper::new(actix_log)) - .service(status) - .service(health) - .service( - web::scope("/api") - .service(join_group) - .service(groups::scope()) - ); - log_perf("Web server app created", app_start.elapsed()); - app - }) - .bind_uds(server_socket_path)? - .bind((lan_address, lan_port))? - .disable_signals() - .workers(worker_count); + backend.start().await.context("Backend failed to start"); + } - log_perf("Web server initialized", start_instant.elapsed()); - log_info!(TAG, "Starting web server..."); + log_perf("Backend started", start_instant.elapsed()); + + let web_server = HttpServer::new(move || { + let app_start = Instant::now(); + let app = App::new() + .wrap(RouteDumper::new(actix_log)) + .service(status) + .service(health) + .service( + web::scope("/api") + .service(join_group) + .service(groups::scope()) + ); + log_perf("Web server app created", app_start.elapsed()); + app + }) + .bind_uds(server_socket_path)? + .bind((lan_address, lan_port))? + .disable_signals() + .workers(worker_count); + + log_perf("Web server initialized", start_instant.elapsed()); + log_info!(TAG, "Starting web server..."); - let server_future = web_server.run(); - log_perf("Web server started", start_instant.elapsed()); - - server_future.await.context("Failed to start server") - } + let server_future = web_server.run(); + log_perf("Web server started", start_instant.elapsed()); - pub async fn stop() -> anyhow::Result<()> { - let mut backend = get_backend().await?; + server_future.await.context("Failed to start server") +} - match backend.stop().await { - Ok(_) => log_debug!(TAG, "Backend shut down successfully."), - Err(e) => log_error!(TAG, "Failed to shut down backend: {:?}", e), - } +pub async fn stop() -> anyhow::Result<()> { + let mut backend = get_backend().await?; - Ok(()) + match backend.stop().await { + Ok(_) => log_debug!(TAG, "Backend shut down successfully."), + Err(e) => log_error!(TAG, "Failed to shut down backend: {:?}", e), } + + Ok(()) } From 023c97a4e06ac2e90ebeaa7f3837afb6b65997ce Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:25:48 -0400 Subject: [PATCH 02/17] clippy: remove unnecessary references --- src/repos.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/repos.rs b/src/repos.rs index f5e540b..eff38cb 100644 --- a/src/repos.rs +++ b/src/repos.rs @@ -3,14 +3,14 @@ use crate::error::{AppError, AppResult}; use crate::log_debug; use crate::media; use crate::models::{AsyncFrom, GroupPath, GroupRepoPath, SnowbirdRepo}; -use crate::server::server::get_backend; +use crate::server::get_backend; use crate::utils::create_veilid_cryptokey_from_base64; use actix_web::{get, post, web, HttpResponse, Responder, Scope}; -use save_dweb_backend::common::DHTEntity; use save_dweb_backend::group::Group; -// use save_dweb_backend::repo::Repo; +use save_dweb_backend::common::DHTEntity; use serde::Deserialize; use serde_json::json; +use anyhow::Result; pub fn scope() -> Scope { web::scope("/repos") @@ -35,7 +35,7 @@ async fn list_repos(path: web::Path) -> AppResult { log_debug!(TAG, "group_id = {}", group_id); // Fetch the backend and the group - let crypto_key = create_veilid_cryptokey_from_base64(&group_id)?; + let crypto_key = create_veilid_cryptokey_from_base64(group_id)?; let backend = get_backend().await?; let group = backend.get_group(&crypto_key).await?; log_debug!(TAG, "got group"); @@ -53,12 +53,12 @@ async fn get_repo(path: web::Path) -> AppResult { let repo_id = &path_params.repo_id; // Fetch the backend and the group - let crypto_key = create_veilid_cryptokey_from_base64(&group_id)?; + let crypto_key = create_veilid_cryptokey_from_base64(group_id)?; let backend = get_backend().await?; let group = backend.get_group(&crypto_key).await?; // Fetch the repo from the group - let repo_crypto_key = create_veilid_cryptokey_from_base64(&repo_id)?; + let repo_crypto_key = create_veilid_cryptokey_from_base64(repo_id)?; let repo = group.get_repo(&repo_crypto_key).await?; // Now, convert the owned Repo into SnowbirdRepo From 8118990566aa50e826088b3ac2c475c4f7634e3c Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:26:20 -0400 Subject: [PATCH 03/17] clippy: remove unnecessary references --- src/media.rs | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/src/media.rs b/src/media.rs index 8355f33..5e3a75b 100644 --- a/src/media.rs +++ b/src/media.rs @@ -1,7 +1,7 @@ use crate::constants::TAG; use crate::error::{AppError, AppResult}; use crate::models::{GroupRepoMediaPath, GroupRepoPath}; -use crate::server::server::get_backend; +use crate::server::get_backend; use crate::utils::create_veilid_cryptokey_from_base64; use crate::log_info; use actix_web::{delete, get, post, web, HttpResponse, Responder, Scope, http::header, error::BlockingError}; @@ -47,12 +47,12 @@ async fn list_files(path: web::Path) -> AppResult let repo_id = &path_params.repo_id; // Fetch the backend and group - let crypto_key = create_veilid_cryptokey_from_base64(&group_id)?; + let crypto_key = create_veilid_cryptokey_from_base64(group_id)?; let backend = get_backend().await?; let group = backend.get_group(&crypto_key).await?; // Fetch the repo - let repo_crypto_key = create_veilid_cryptokey_from_base64(&repo_id)?; + let repo_crypto_key = create_veilid_cryptokey_from_base64(repo_id)?; let repo = group.get_repo(&repo_crypto_key).await?; let hash = repo.get_hash_from_dht().await?; @@ -87,12 +87,12 @@ async fn download_file(path: web::Path) -> AppResult) -> AppResult) -> AppResult) -> AppResult { +async fn delete_file(path: web::Path) -> AppResult { let path_params = path.into_inner(); let group_id = &path_params.group_id; let repo_id = &path_params.repo_id; let file_name = &path_params.file_name; // Fetch the backend and group - let crypto_key = create_veilid_cryptokey_from_base64(&group_id)?; + let crypto_key = create_veilid_cryptokey_from_base64(group_id)?; let backend = get_backend().await?; let group = backend.get_group(&crypto_key).await?; // Fetch the repo - let repo_crypto_key = create_veilid_cryptokey_from_base64(&repo_id)?; + let repo_crypto_key = create_veilid_cryptokey_from_base64(repo_id)?; let repo = group.get_repo(&repo_crypto_key).await?; // Delete the file and update the collection - let collection_hash = repo.delete_file(&file_name).await?; + let collection_hash = repo.delete_file(file_name).await?; Ok(HttpResponse::Ok().json(collection_hash)) } @@ -156,7 +154,7 @@ async fn upload_file( let file_name = &path_params.file_name; // Fetch the backend and group with proper error handling - let crypto_key = create_veilid_cryptokey_from_base64(&group_id) + let crypto_key = create_veilid_cryptokey_from_base64(group_id) .map_err(|e| anyhow::anyhow!("Invalid group id: {}", e))?; let backend = get_backend() .await @@ -167,7 +165,7 @@ async fn upload_file( .map_err(|e| anyhow::anyhow!("Failed to get group: {}", e))?; // Fetch the repo with proper error handling - let repo_crypto_key = create_veilid_cryptokey_from_base64(&repo_id) + let repo_crypto_key = create_veilid_cryptokey_from_base64(repo_id) .map_err(|e| anyhow::anyhow!("Invalid repo id: {}", e))?; let repo = group .get_repo(&repo_crypto_key) @@ -191,7 +189,7 @@ async fn upload_file( // Upload the file let updated_collection_hash = repo - .upload(&file_name, file_data) + .upload(file_name, file_data) .await .map_err(|e| anyhow::anyhow!("Failed to upload file: {}", e))?; From d1ca56b382fc50dd0d2bc4119a66c9b347e29f01 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:26:50 -0400 Subject: [PATCH 04/17] fix: correct macro reference for android_log_print in logging module --- src/logging.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging.rs b/src/logging.rs index 4db53e8..d8c3025 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -38,7 +38,7 @@ pub const LOG_LEVEL_ERROR: i32 = 6; #[macro_export] macro_rules! android_log_print { ($level:expr, $tag:expr, $($arg:tt)*) => { - crate::logging::android_log($level, $tag, &format!("[{}:{}] {}", file!(), line!(), format_args!($($arg)*))) + $crate::logging::android_log($level, $tag, &format!("[{}:{}] {}", file!(), line!(), format_args!($($arg)*))) } } From 3aea8c06d62cbdf2e5d2dbd085a9da9cd7a31d1d Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:27:20 -0400 Subject: [PATCH 05/17] initialize all_files_vec with type annotation --- src/groups.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/groups.rs b/src/groups.rs index 3ce5310..3e0a516 100644 --- a/src/groups.rs +++ b/src/groups.rs @@ -1,14 +1,14 @@ use actix_web::{web, delete, get, post, Responder, HttpResponse}; -use save_dweb_backend::common::DHTEntity; use serde_json::json; use crate::error::AppResult; use crate::log_debug; -use crate::models::IntoSnowbirdGroupsWithNames; -use crate::models::{RequestName, SnowbirdGroup, RequestUrl}; +use crate::models::{IntoSnowbirdGroupsWithNames, RequestName, RequestUrl, SnowbirdGroup}; use crate::repos; -use crate::constants::TAG; -use crate::server::server::get_backend; +use crate::constants::{TAG}; + +use crate::server::get_backend; use crate::utils::create_veilid_cryptokey_from_base64; +use save_dweb_backend::common::DHTEntity; pub fn scope() -> actix_web::Scope { web::scope("/groups") @@ -160,7 +160,7 @@ async fn refresh_group(group_id: web::Path) -> AppResult "all_files": json!(Vec::::new()) // Initialize empty }); let mut refreshed_files_vec = Vec::new(); - let mut all_files_vec = Vec::new(); + let mut all_files_vec: Vec = Vec::new(); // Get current repo hash and collection info match repo.get_hash_from_dht().await { From d04b7780160150770f1a1aa2ad775559bc3ee8f0 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:29:53 -0400 Subject: [PATCH 06/17] Enhance internet_ready function with timeout and retry logic --- src/lib.rs | 46 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 67ddc57..518cf31 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -52,17 +52,55 @@ mod tests { files: Vec, } - // Helper: Wait for public internet readiness + // Helper: Wait for public internet readiness with timeout and retries async fn wait_for_public_internet_ready(backend: &Backend) -> anyhow::Result<()> { let mut rx = backend.subscribe_updates().await.ok_or_else(|| anyhow::anyhow!("No update receiver"))?; + + // Use a shorter timeout for tests (10 seconds) + let timeout = if cfg!(test) { + Duration::from_secs(10) + } else { + Duration::from_secs(30) + }; + + log::info!("Waiting for public internet to be ready (timeout: {:?})", timeout); + + // Try up to 3 times with exponential backoff + let mut retry_count = 0; + let max_retries = 3; + + while retry_count < max_retries { + match tokio::time::timeout(timeout, async { while let Ok(update) = rx.recv().await { - if let VeilidUpdate::Attachment(attachment_state) = update { + match &update { + VeilidUpdate::Attachment(attachment_state) => { + log::debug!("Veilid attachment state: {:?}", attachment_state); if attachment_state.public_internet_ready { - break; + log::info!("Public internet is ready!"); + return Ok(()); + } + } + _ => log::trace!("Received Veilid update: {:?}", update), + } + } + Err(anyhow::anyhow!("Update channel closed before network was ready")) + }).await { + Ok(result) => return result, + Err(_) => { + retry_count += 1; + if retry_count < max_retries { + let backoff = Duration::from_secs(2u64.pow(retry_count as u32)); + log::warn!("Timeout waiting for public internet (attempt {}/{})", retry_count, max_retries); + log::info!("Retrying in {:?}...", backoff); + tokio::time::sleep(backoff).await; + // Resubscribe to get a fresh update channel + rx = backend.subscribe_updates().await.ok_or_else(|| anyhow::anyhow!("No update receiver"))?; + } } } } - Ok(()) + + Err(anyhow::anyhow!("Failed to establish public internet connection after {} attempts", max_retries)) } #[actix_web::test] From 56d3debeeee7bc9d6cabeb7a6469ca3f4abedec7 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:30:36 -0400 Subject: [PATCH 07/17] add delays after joining and stopping backends --- src/lib.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 518cf31..6210975 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,7 @@ mod tests { use save_dweb_backend::{common::DHTEntity, constants::TEST_GROUP_NAME}; use serde::{Deserialize, Serialize}; use serde_json::json; - use server::server::{get_backend, init_backend, status, health, BACKEND}; + use server::{get_backend, init_backend, status, health, BACKEND}; use tmpdir::TmpDir; use base64_url::base64; use base64_url::base64::Engine; @@ -71,15 +71,15 @@ mod tests { while retry_count < max_retries { match tokio::time::timeout(timeout, async { - while let Ok(update) = rx.recv().await { + while let Ok(update) = rx.recv().await { match &update { VeilidUpdate::Attachment(attachment_state) => { log::debug!("Veilid attachment state: {:?}", attachment_state); - if attachment_state.public_internet_ready { + if attachment_state.public_internet_ready { log::info!("Public internet is ready!"); return Ok(()); - } - } + } + } _ => log::trace!("Received Veilid update: {:?}", update), } } @@ -438,6 +438,9 @@ mod tests { backend.join_from_url(join_url.as_str()).await?; } + // Add delay to ensure proper synchronization after joining + tokio::time::sleep(Duration::from_secs(5)).await; + let get_file_req = test::TestRequest::get() .uri(&format!( "/api/groups/{}/repos/{}/media/{}", @@ -458,6 +461,8 @@ mod tests { // Clean up backend2.stop().await?; + tokio::time::sleep(Duration::from_secs(5)).await; + { let backend = get_backend().await?; backend.stop().await.expect("Backend failed to stop"); @@ -467,7 +472,7 @@ mod tests { veilid_api2.shutdown().await; Ok(()) - } + } #[actix_web::test] #[serial] From e0603de28c970c4f3c3592c5e4f01d246f560301 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 19:37:41 -0400 Subject: [PATCH 08/17] add CI workflow for linting and testing with Clippy and Rust --- .github/workflows/lint_and_test.yml | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/lint_and_test.yml diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml new file mode 100644 index 0000000..09c20d4 --- /dev/null +++ b/.github/workflows/lint_and_test.yml @@ -0,0 +1,31 @@ +name: CI + +on: [push, pull_request] + +jobs: + lint_and_test: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + rust: [stable] + + steps: + - name: Set up Rust toolchain + uses: hecrj/setup-rust-action@v2 + with: + rust-version: ${{ matrix.rust }} + + - name: Check out the code + uses: actions/checkout@v4 + + - name: Install Clippy + run: rustup component add clippy + + - name: Run Clippy + run: cargo clippy --all-targets --all-features -- -D warnings + + - name: Run tests + env: + RUST_MIN_STACK: 8388608 + run: cargo test --verbose -- --test-threads=1 --nocapture \ No newline at end of file From d180d0f9c79386b52da6fc89afd618facdb71cf3 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 20:17:18 -0400 Subject: [PATCH 09/17] Update README.md to include API endpoints documentation --- README.md | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e056ae6..2dac979 100644 --- a/README.md +++ b/README.md @@ -11,4 +11,56 @@ Bindings to the save-dweb-backend for the Save Android app. - Have `save-android` set up in the parent folder - Set up the `ANDROID_NDK_HOME` variable - `./build-android.sh` -- You can now recompile the android app. \ No newline at end of file +- You can now recompile the android app. + +# API Endpoints + +available HTTP API endpoints. + +## general + +* `GET /status` - Returns the server status and version. + +* `GET /health` - Returns the server health status. + +* `POST /api/memberships` - Joins a group. + +## Groups + +base path: `/api/groups` + +* `GET /` - Lists all groups. + +* `POST /` - Creates a new group. + +* `POST /join_from_url` - Joins a group using a URL. + +* `GET /{group_id}` - Retrieves a specific group by its ID. + +* `DELETE /{group_id}` - Deletes a group by its ID. + +* `POST /{group_id}/refresh` - Refreshes a group by its ID. + +## Repositories + +base path: `/api/groups/{group_id}/repos` + +* `GET /` - Lists all repositories within a group. + +* `POST /` - Creates a new repository within a group. + +* `GET /{repo_id}` - Retrieves a specific repository within a group. + +## Media + +base path: `/api/groups/{group_id}/repos/{repo_id}/media` + +* `GET /` - Lists all files in a repository. + +* `POST /{file_name}` - Uploads a file to a repository. + +* `GET /{file_name}` - Downloads a specific file from a repository. + +* `DELETE /{file_name}` - Deletes a specific file from a repository. + + From 44ab31bcdb1617da08f6afb94a08a84d3cf6dd41 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 21:20:46 -0400 Subject: [PATCH 10/17] Refactor test cleanup process by introducing a helper function Update test cases to utilize the new cleanup function for stopping backends and ensure proper synchronization after operations. --- src/lib.rs | 140 +++++++++++++++++++++++++++-------------------------- 1 file changed, 72 insertions(+), 68 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 6210975..ad5306f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -103,6 +103,19 @@ mod tests { Err(anyhow::anyhow!("Failed to establish public internet connection after {} attempts", max_retries)) } + // Helper function to properly clean up test resources + async fn cleanup_test_resources(backend: &Backend) -> Result<()> { + log::info!("Cleaning up test resources..."); + + // Stop the backend, which will also handle VeilidAPI shutdown + backend.stop().await?; + + // Add a small delay to ensure everything is cleaned up + tokio::time::sleep(Duration::from_millis(500)).await; + + Ok(()) + } + #[actix_web::test] #[serial] async fn basic_test() -> Result<()> { @@ -291,7 +304,7 @@ mod tests { // Clean up: Stop the backend { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } Ok(()) @@ -324,7 +337,7 @@ mod tests { .await?; let backend2 = Backend::from_dependencies( &path.to_path_buf(), - veilid_api2.clone(), + veilid_api2, update_rx2, store2, ) @@ -373,10 +386,11 @@ mod tests { assert_eq!(resp.repos.len(), 1, "Should have 1 repo after joining"); - backend2.stop().await?; + // Clean up both backends using the helper function + cleanup_test_resources(&backend2).await?; { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } Ok(()) @@ -396,7 +410,7 @@ mod tests { .await?; let backend2 = Backend::from_dependencies( &path.to_path_buf(), - veilid_api2.clone(), + veilid_api2, update_rx2, store2, ) @@ -438,38 +452,44 @@ mod tests { backend.join_from_url(join_url.as_str()).await?; } - // Add delay to ensure proper synchronization after joining - tokio::time::sleep(Duration::from_secs(5)).await; + // Wait for replication to complete + tokio::time::sleep(Duration::from_secs(2)).await; + + // Test HTTP endpoints after replication + // 1. Verify group exists and has correct name + let groups_req = test::TestRequest::get().uri("/api/groups").to_request(); + let groups_resp: GroupsResponse = test::call_and_read_body_json(&app, groups_req).await; + assert_eq!(groups_resp.groups.len(), 1, "Should have one group after joining"); + assert_eq!(groups_resp.groups[0].name, Some(TEST_GROUP_NAME.to_string()), + "Group should have correct name"); + + // 2. Verify repo exists and has correct name + let repos_req = test::TestRequest::get() + .uri(&format!("/api/groups/{}/repos", group.id())) + .to_request(); + let repos_resp: ReposResponse = test::call_and_read_body_json(&app, repos_req).await; + assert_eq!(repos_resp.repos.len(), 1, "Should have one repo after joining"); + assert_eq!(repos_resp.repos[0].name, TEST_GROUP_NAME, "Repo should have correct name"); - let get_file_req = test::TestRequest::get() + // 3. Verify file exists and has correct content + let file_req = test::TestRequest::get() .uri(&format!( "/api/groups/{}/repos/{}/media/{}", - group.id().to_string(), - repo.id().to_string(), - file_name + group.id(), repo.id(), file_name )) .to_request(); - let get_file_resp = test::call_service(&app, get_file_req).await; - assert!(get_file_resp.status().is_success(), "File download failed"); - - let got_file_data = test::read_body(get_file_resp).await; - assert_eq!( - got_file_data.to_vec().as_slice(), - file_content, - "Downloaded back file content" - ); - - // Clean up - backend2.stop().await?; - tokio::time::sleep(Duration::from_secs(5)).await; - + let file_resp = test::call_service(&app, file_req).await; + assert!(file_resp.status().is_success(), "File should be accessible after replication"); + let got_content = test::read_body(file_resp).await; + assert_eq!(got_content.to_vec(), file_content.to_vec(), + "File content should match after replication"); + + // Clean up both backends using the helper function + cleanup_test_resources(&backend2).await?; { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - // Add delay to allow tasks to complete - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api2.shutdown().await; Ok(()) } @@ -484,11 +504,10 @@ mod tests { // Initialize the app with basic setup let path = TmpDir::new("test_refresh_nonexistent").await?; BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); - let veilid_api = { + { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); - backend.get_veilid_api().await.unwrap() - }; + } let app = test::init_service( App::new() @@ -509,11 +528,8 @@ mod tests { // Clean up { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - // Add delay to allow tasks to complete - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api.shutdown().await; Ok(()) } @@ -528,11 +544,10 @@ mod tests { // Initialize the app with basic setup let path = TmpDir::new("test_refresh_empty").await?; BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); - let veilid_api = { + { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); - backend.get_veilid_api().await.unwrap() - }; + } // Create an empty group let empty_group = { @@ -561,11 +576,8 @@ mod tests { // Clean up { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - // Add delay to allow tasks to complete - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api.shutdown().await; Ok(()) } @@ -582,14 +594,13 @@ mod tests { BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); // Start backend and wait for public internet readiness - let veilid_api = { + { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); log::info!("Waiting for public internet readiness..."); wait_for_public_internet_ready(&backend).await?; log::info!("Public internet is ready"); - backend.get_veilid_api().await.unwrap() - }; + } // Create a group with a repo and upload a dummy file let (group, repo, dummy_file_name, dummy_file_content) = { @@ -674,11 +685,8 @@ mod tests { log::info!("Cleaning up test resources..."); { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - // Add delay to allow tasks to complete - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api.shutdown().await; Ok(()) } @@ -693,11 +701,10 @@ mod tests { // Initialize the app with basic setup let path = TmpDir::new("test_refresh_with_file").await?; BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); - let veilid_api = { + { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); - backend.get_veilid_api().await.unwrap() - }; + } // Create a group with a repo and upload a file let (group, repo) = { @@ -754,11 +761,8 @@ mod tests { // Clean up { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - // Add delay to allow tasks to complete - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api.shutdown().await; Ok(()) } @@ -782,14 +786,14 @@ mod tests { .await?; let backend2 = Backend::from_dependencies( &path.to_path_buf(), - veilid_api2.clone(), + veilid_api2, update_rx2, store2, ) .await .unwrap(); - // Create group and repo in backend2 (without an explicit start or wait_for_public_internet_ready) + // Create group and repo in backend2 let mut group = backend2.create_group().await?; group.set_name(TEST_GROUP_NAME).await?; let repo = group.create_repo().await?; @@ -801,10 +805,10 @@ mod tests { repo.upload(file_name, file_content.to_vec()).await?; log::info!("Uploaded test file to creator's repo"); - // Wait for DHT propagation (after upload, before global BACKEND is initialized) + // Wait for DHT propagation tokio::time::sleep(Duration::from_secs(2)).await; - // Initialize and start the global BACKEND (joiner) (with a wait_for_public_internet_ready) + // Initialize and start the global BACKEND (joiner) BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); { let backend = get_backend().await?; @@ -821,7 +825,7 @@ mod tests { log::info!("Successfully joined group"); } - // Wait for replication (after joining, before refresh endpoint is called) + // Wait for replication tokio::time::sleep(Duration::from_secs(2)).await; // Initialize app for API testing @@ -833,12 +837,13 @@ mod tests { ) .await; - // Test refresh endpoint (after joining and waiting) + // Test refresh endpoint log::info!("Testing refresh endpoint for joined group"); let refresh_req = test::TestRequest::post() .uri(&format!("/api/groups/{}/refresh", group.id())) .to_request(); let refresh_resp = test::call_service(&app, refresh_req).await; + // Verify response status and content assert!(refresh_resp.status().is_success(), "Refresh should succeed"); let refresh_data: serde_json::Value = test::read_body_json(refresh_resp).await; @@ -893,19 +898,18 @@ mod tests { assert_eq!(all_files_second.len(), 1, "Should still have one file in all_files on second refresh"); assert_eq!(all_files_second[0].as_str().unwrap(), file_name, "all_files should still contain the uploaded file on second refresh"); - // Clean up (stop backend2, stop global BACKEND, shutdown veilid_api2) + // Clean up (stop backend2, stop global BACKEND) log::info!("Cleaning up test resources..."); - backend2.stop().await?; + cleanup_test_resources(&backend2).await?; { let backend = get_backend().await?; backend.stop().await.expect("Backend failed to stop"); } - tokio::time::sleep(Duration::from_secs(2)).await; - veilid_api2.shutdown().await; + + tokio::time::sleep(Duration::from_secs(1)).await; Ok(()) } - #[actix_web::test] #[serial] async fn test_health_endpoint() -> Result<()> { From 1201badadbcb8afe1751cb2672c7a6a91ed80f60 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 21:22:54 -0400 Subject: [PATCH 11/17] Update CI workflow to trigger only on pull requests --- .github/workflows/lint_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 09c20d4..3ce9db4 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -1,6 +1,6 @@ name: CI -on: [push, pull_request] +on: [pull_request] jobs: lint_and_test: From 721ecac499dac3102bd495d4ae010ca747e361dd Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 21:27:58 -0400 Subject: [PATCH 12/17] clippy fixes --- src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ad5306f..f443bd9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,7 +32,6 @@ mod tests { use tmpdir::TmpDir; use base64_url::base64; use base64_url::base64::Engine; - use env_logger; use save_dweb_backend::backend::Backend; use veilid_core::VeilidUpdate; use serial_test::serial; @@ -436,7 +435,7 @@ mod tests { let file_name = "example.txt"; let file_content = b"Test content for file upload"; - repo.upload(&file_name, file_content.to_vec()).await?; + repo.upload(file_name, file_content.to_vec()).await?; tokio::time::sleep(Duration::from_secs(2)).await; From ae1dd35abe14a0eb20f5a03809f32b1cc2c5b203 Mon Sep 17 00:00:00 2001 From: vincent Date: Fri, 23 May 2025 21:30:22 -0400 Subject: [PATCH 13/17] match route signatures Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/media.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/media.rs b/src/media.rs index 5e3a75b..e9bed45 100644 --- a/src/media.rs +++ b/src/media.rs @@ -122,7 +122,7 @@ async fn download_file(path: web::Path) -> AppResult) -> AppResult { +async fn delete_file(path: web::Path) -> AppResult { let path_params = path.into_inner(); let group_id = &path_params.group_id; let repo_id = &path_params.repo_id; From 8bd85f6259e04c987bac78e6bf0d5d9ae09f5322 Mon Sep 17 00:00:00 2001 From: vincent Date: Sat, 24 May 2025 13:43:27 -0400 Subject: [PATCH 14/17] Refactor CI workflow to separate linting and testing jobs, and run individual tests in parallel --- .github/workflows/lint_and_test.yml | 42 ++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 3ce9db4..b93999d 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -3,18 +3,13 @@ name: CI on: [pull_request] jobs: - lint_and_test: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest] - rust: [stable] - + lint: + runs-on: ubuntu-latest steps: - name: Set up Rust toolchain uses: hecrj/setup-rust-action@v2 with: - rust-version: ${{ matrix.rust }} + rust-version: stable - name: Check out the code uses: actions/checkout@v4 @@ -25,7 +20,34 @@ jobs: - name: Run Clippy run: cargo clippy --all-targets --all-features -- -D warnings - - name: Run tests + test: + needs: lint + runs-on: ubuntu-latest + strategy: + matrix: + test_name: [ + "tests::basic_test", + "tests::test_health_endpoint", + "tests::test_join_group", + "tests::test_refresh_empty_group", + "tests::test_refresh_group_with_file", + "tests::test_refresh_group_with_single_repo", + "tests::test_refresh_joined_group", + "tests::test_refresh_nonexistent_group", + "tests::test_replicate_group", + "tests::test_upload_list_delete" + ] + + steps: + - name: Set up Rust toolchain + uses: hecrj/setup-rust-action@v2 + with: + rust-version: stable + + - name: Check out the code + uses: actions/checkout@v4 + + - name: Run individual test env: RUST_MIN_STACK: 8388608 - run: cargo test --verbose -- --test-threads=1 --nocapture \ No newline at end of file + run: cargo test --verbose -- ${{ matrix.test_name }} --test-threads=1 --nocapture \ No newline at end of file From a762c5142f5ba549f4fae12d11e8934b480740be Mon Sep 17 00:00:00 2001 From: vincent Date: Sat, 24 May 2025 14:33:52 -0400 Subject: [PATCH 15/17] update test cases for group replication and refresh functionality Updated test names and paths for clarity, improved comments for better understanding, and ensured proper synchronization during group creation, file upload, and refresh operations. Enhanced assertions to verify expected outcomes after each refresh. --- src/lib.rs | 189 +++++++++++++++++++++++++---------------------------- 1 file changed, 89 insertions(+), 100 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index f443bd9..4a2f2a6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -399,8 +399,9 @@ mod tests { #[serial] async fn test_replicate_group() -> Result<()> { // Initialize the app - let path = TmpDir::new("test_api_repo_file_operations").await?; - + let path = TmpDir::new("test_replicate_group").await?; + + // Create second backend (creator) first let store2 = iroh_blobs::store::fs::Store::load(path.to_path_buf().join("iroh2")).await?; let (veilid_api2, update_rx2) = save_dweb_backend::common::init_veilid( path.to_path_buf().join("test2").as_path(), @@ -415,45 +416,44 @@ mod tests { ) .await .unwrap(); - + + // Initialize main backend (joiner) BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); } - + + // Create group and repo in backend2 (creator) let mut group = backend2.create_group().await?; - let join_url = group.get_url(); - group.set_name(TEST_GROUP_NAME).await?; - let repo = group.create_repo().await?; repo.set_name(TEST_GROUP_NAME).await?; - - // Step 3: Upload a file to the repository + + // Upload a file to the repository let file_name = "example.txt"; let file_content = b"Test content for file upload"; - repo.upload(file_name, file_content.to_vec()).await?; - + tokio::time::sleep(Duration::from_secs(2)).await; - + let app = test::init_service( App::new() .service(status) .service(web::scope("/api").service(groups::scope())), ) .await; - + + // Join the group using the main backend { let backend = get_backend().await?; backend.join_from_url(join_url.as_str()).await?; } - + // Wait for replication to complete tokio::time::sleep(Duration::from_secs(2)).await; - + // Test HTTP endpoints after replication // 1. Verify group exists and has correct name let groups_req = test::TestRequest::get().uri("/api/groups").to_request(); @@ -461,7 +461,7 @@ mod tests { assert_eq!(groups_resp.groups.len(), 1, "Should have one group after joining"); assert_eq!(groups_resp.groups[0].name, Some(TEST_GROUP_NAME.to_string()), "Group should have correct name"); - + // 2. Verify repo exists and has correct name let repos_req = test::TestRequest::get() .uri(&format!("/api/groups/{}/repos", group.id())) @@ -469,7 +469,7 @@ mod tests { let repos_resp: ReposResponse = test::call_and_read_body_json(&app, repos_req).await; assert_eq!(repos_resp.repos.len(), 1, "Should have one repo after joining"); assert_eq!(repos_resp.repos[0].name, TEST_GROUP_NAME, "Repo should have correct name"); - + // 3. Verify file exists and has correct content let file_req = test::TestRequest::get() .uri(&format!( @@ -482,16 +482,16 @@ mod tests { let got_content = test::read_body(file_resp).await; assert_eq!(got_content.to_vec(), file_content.to_vec(), "File content should match after replication"); - + // Clean up both backends using the helper function cleanup_test_resources(&backend2).await?; { let backend = get_backend().await?; cleanup_test_resources(&backend).await?; } - + Ok(()) - } + } #[actix_web::test] #[serial] @@ -769,14 +769,14 @@ mod tests { #[actix_web::test] #[serial] async fn test_refresh_joined_group() -> Result<()> { - // Initialize logging + // Initialize logging let _ = env_logger::try_init(); log::info!("Testing refresh of joined group"); - // Initialize the app with basic setup - let path = TmpDir::new("test_refresh_joined").await?; + // Initialize the app + let path = TmpDir::new("test_refresh_joined_group").await?; - // Initialize backend2 (creator) first + // Create second backend (creator) first let store2 = iroh_blobs::store::fs::Store::load(path.to_path_buf().join("iroh2")).await?; let (veilid_api2, update_rx2) = save_dweb_backend::common::init_veilid( path.to_path_buf().join("test2").as_path(), @@ -792,121 +792,110 @@ mod tests { .await .unwrap(); - // Create group and repo in backend2 - let mut group = backend2.create_group().await?; - group.set_name(TEST_GROUP_NAME).await?; - let repo = group.create_repo().await?; - repo.set_name("Test Repo").await?; - - // Upload a file (using backend2) to ensure repo has a collection/hash - let file_name = "test.txt"; - let file_content = b"Test content for joined group"; - repo.upload(file_name, file_content.to_vec()).await?; - log::info!("Uploaded test file to creator's repo"); - - // Wait for DHT propagation - tokio::time::sleep(Duration::from_secs(2)).await; - - // Initialize and start the global BACKEND (joiner) + // Initialize main backend (joiner) BACKEND.get_or_init(|| init_backend(path.to_path_buf().as_path())); { let backend = get_backend().await?; backend.start().await.expect("Backend failed to start"); - log::info!("Waiting for public internet readiness for global BACKEND..."); - wait_for_public_internet_ready(&backend).await?; - log::info!("Public internet is ready for global BACKEND"); } - // Join the group (using the global BACKEND) - { - let backend = get_backend().await?; - backend.join_from_url(group.get_url().as_str()).await?; - log::info!("Successfully joined group"); - } + // Create group and repo in backend2 (creator) + let mut group = backend2.create_group().await?; + let join_url = group.get_url(); + group.set_name(TEST_GROUP_NAME).await?; + let repo = group.create_repo().await?; + repo.set_name(TEST_GROUP_NAME).await?; + + // Upload a file to the repository + let file_name = "example.txt"; + let file_content = b"Test content for file upload"; + repo.upload(file_name, file_content.to_vec()).await?; - // Wait for replication tokio::time::sleep(Duration::from_secs(2)).await; - // Initialize app for API testing let app = test::init_service( App::new() .service(status) - .service(health) .service(web::scope("/api").service(groups::scope())), ) .await; - // Test refresh endpoint - log::info!("Testing refresh endpoint for joined group"); + // Join the group using the main backend + { + let backend = get_backend().await?; + backend.join_from_url(join_url.as_str()).await?; + } + + // Wait for replication to complete + tokio::time::sleep(Duration::from_secs(2)).await; + + // Test first refresh - should fetch files from network let refresh_req = test::TestRequest::post() .uri(&format!("/api/groups/{}/refresh", group.id())) .to_request(); let refresh_resp = test::call_service(&app, refresh_req).await; + assert!(refresh_resp.status().is_success(), "First refresh should succeed"); - // Verify response status and content - assert!(refresh_resp.status().is_success(), "Refresh should succeed"); let refresh_data: serde_json::Value = test::read_body_json(refresh_resp).await; - log::info!("Refresh response: {:?}", refresh_data); - assert_eq!(refresh_data["status"], "success", "Response should indicate success"); + assert_eq!(refresh_data["status"], "success", "First refresh status should be success"); + let repos = refresh_data["repos"].as_array().expect("repos should be an array"); - assert_eq!(repos.len(), 1, "Should have one repo"); + assert_eq!(repos.len(), 1, "Should have one repo after joining"); + let repo_data = &repos[0]; - assert!(repo_data["repo_hash"].is_string(), "repo should have a hash"); - assert_eq!(repo_data["name"], "Test Repo", "repo name should match"); + assert_eq!(repo_data["name"], TEST_GROUP_NAME, "Repo should have correct name"); - // Verify files from the FIRST refresh - let refreshed_files_first = repo_data["refreshed_files"].as_array() - .expect("refreshed_files should be an array for first refresh"); - assert_eq!(refreshed_files_first.len(), 1, "One file should be refreshed on initial sync"); - assert_eq!(refreshed_files_first[0].as_str().unwrap(), file_name, "The correct file should be in refreshed_files on initial sync"); + // First refresh should have refreshed files + let refreshed_files = repo_data["refreshed_files"].as_array() + .expect("refreshed_files should be an array"); + assert_eq!(refreshed_files.len(), 1, "Should have refreshed 1 file on first refresh"); + assert_eq!(refreshed_files[0].as_str().unwrap(), file_name, + "Should have refreshed the correct file"); - let all_files_first = repo_data["all_files"].as_array().expect("all_files should be an array for first refresh"); - assert_eq!(all_files_first.len(), 1, "Should have one file in all_files on first refresh"); - assert_eq!(all_files_first[0].as_str().unwrap(), file_name, "all_files should contain the uploaded file on first refresh"); + let all_files = repo_data["all_files"].as_array().expect("all_files should be an array"); + assert_eq!(all_files.len(), 1, "Should have one file in all_files"); + assert_eq!(all_files[0].as_str().unwrap(), file_name, + "all_files should contain the uploaded file"); - // Verify file is accessible (after first refresh) - let get_file_req_first = test::TestRequest::get() + // Verify file is accessible after refresh + let get_file_req = test::TestRequest::get() .uri(&format!( "/api/groups/{}/repos/{}/media/{}", group.id(), repo.id(), file_name )) .to_request(); - let get_file_resp_first = test::call_service(&app, get_file_req_first).await; - assert!(get_file_resp_first.status().is_success(), "File should be accessible after first refresh"); - let got_content_first = test::read_body(get_file_resp_first).await; - assert_eq!(got_content_first.to_vec(), file_content.to_vec(), "File content should match after first refresh"); - - // ---- SECOND REFRESH ---- - log::info!("Testing second refresh endpoint for joined group (should be no-op)"); - let refresh_req_second = test::TestRequest::post() + let get_file_resp = test::call_service(&app, get_file_req).await; + assert!(get_file_resp.status().is_success(), "File should be accessible after refresh"); + let got_content = test::read_body(get_file_resp).await; + assert_eq!(got_content.to_vec(), file_content.to_vec(), + "File content should match after refresh"); + + // Test second refresh - should be no-op since all files are present + let refresh_req2 = test::TestRequest::post() .uri(&format!("/api/groups/{}/refresh", group.id())) .to_request(); - let refresh_resp_second = test::call_service(&app, refresh_req_second).await; - assert!(refresh_resp_second.status().is_success(), "Second refresh should succeed"); - let refresh_data_second: serde_json::Value = test::read_body_json(refresh_resp_second).await; - assert_eq!(refresh_data_second["status"], "success", "Second refresh response should indicate success"); - let repos_second = refresh_data_second["repos"].as_array().expect("repos should be an array for second refresh"); - assert_eq!(repos_second.len(), 1, "Should have one repo in second refresh"); - let repo_data_second = &repos_second[0]; - - let refreshed_files_second = repo_data_second["refreshed_files"].as_array() - .expect("refreshed_files should be an array for second refresh"); - assert!(refreshed_files_second.is_empty(), "No files should be refreshed on second sync as all are present"); - - let all_files_second = repo_data_second["all_files"].as_array().expect("all_files should be an array for second refresh"); - assert_eq!(all_files_second.len(), 1, "Should still have one file in all_files on second refresh"); - assert_eq!(all_files_second[0].as_str().unwrap(), file_name, "all_files should still contain the uploaded file on second refresh"); - - // Clean up (stop backend2, stop global BACKEND) - log::info!("Cleaning up test resources..."); + let refresh_resp2 = test::call_service(&app, refresh_req2).await; + assert!(refresh_resp2.status().is_success(), "Second refresh should succeed"); + + let refresh_data2: serde_json::Value = test::read_body_json(refresh_resp2).await; + assert_eq!(refresh_data2["status"], "success", "Second refresh status should be success"); + + let repos2 = refresh_data2["repos"].as_array().expect("repos should be an array"); + assert_eq!(repos2.len(), 1, "Should still have one repo"); + + let repo_data2 = &repos2[0]; + let refreshed_files2 = repo_data2["refreshed_files"].as_array() + .expect("refreshed_files should be an array"); + assert!(refreshed_files2.is_empty(), + "No files should be refreshed on second call since all are present"); + + // Clean up both backends using the helper function cleanup_test_resources(&backend2).await?; { let backend = get_backend().await?; - backend.stop().await.expect("Backend failed to stop"); + cleanup_test_resources(&backend).await?; } - tokio::time::sleep(Duration::from_secs(1)).await; - Ok(()) } #[actix_web::test] From 112ea523cbc9e04ced2184ca5a4cd9725a69ef91 Mon Sep 17 00:00:00 2001 From: vincent Date: Sat, 24 May 2025 17:01:13 -0400 Subject: [PATCH 16/17] Increase timeout duration and retry count in internet readiness checks --- src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 4a2f2a6..2485f13 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,18 +55,17 @@ mod tests { async fn wait_for_public_internet_ready(backend: &Backend) -> anyhow::Result<()> { let mut rx = backend.subscribe_updates().await.ok_or_else(|| anyhow::anyhow!("No update receiver"))?; - // Use a shorter timeout for tests (10 seconds) let timeout = if cfg!(test) { - Duration::from_secs(10) + Duration::from_secs(15) } else { Duration::from_secs(30) }; log::info!("Waiting for public internet to be ready (timeout: {:?})", timeout); - // Try up to 3 times with exponential backoff + // Try up to 6 times with exponential backoff let mut retry_count = 0; - let max_retries = 3; + let max_retries = 6; while retry_count < max_retries { match tokio::time::timeout(timeout, async { From aa07fb44b7c9a68ddf1fd8d4ed4526e751d728a9 Mon Sep 17 00:00:00 2001 From: vincent Date: Sun, 25 May 2025 11:34:44 -0400 Subject: [PATCH 17/17] Update README with API documentation --- API.md | 491 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 39 ++--- 2 files changed, 502 insertions(+), 28 deletions(-) create mode 100644 API.md diff --git a/API.md b/API.md new file mode 100644 index 0000000..6c5b69d --- /dev/null +++ b/API.md @@ -0,0 +1,491 @@ +# Save-Rust API Documentation + +This document provides detailed information about the Save-Rust API endpoints, including request/response schemas and error handling. + +## Table of Contents +- [General Endpoints](#general-endpoints) +- [Groups Endpoints](#groups-endpoints) +- [Repositories Endpoints](#repositories-endpoints) +- [Media Endpoints](#media-endpoints) + +## General Endpoints + +### GET /status +Returns the server status and version information. + +Response: +```json +{ + "status": "running", + "version": "string" // Current version of the server +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Something went wrong: [detailed error message]" +} +``` + +### GET /health +Returns the server health status. + +Response: +```json +{ + "status": "OK" +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Something went wrong: [detailed error message]" +} +``` + +### POST /api/memberships +Joins a group using a membership URL. + +Request Body: +```json +{ + "group_url": "string" // URL containing group information +} +``` + +Response: +```json +{ + "status_message": "string" // Success or error message +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group URL: [detailed error message]" +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Failed to join group: [detailed error message]" +} +``` + +## Groups Endpoints + +Base path: `/api/groups` + +### GET / +Lists all groups. + +Response: +```json +{ + "groups": [ + { + "key": "string", // Base64 encoded group ID + "name": "string", // Optional group name + "created_at": "string" // ISO 8601 timestamp + } + ] +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Failed to list groups: [detailed error message]" +} +``` + +### POST / +Creates a new group. + +Request Body: +```json +{ + "name": "string" // Name for the new group +} +``` + +Response: +```json +{ + "key": "string", // Base64 encoded group ID + "name": "string", // Group name + "created_at": "string" // ISO 8601 timestamp +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group name: [detailed error message]" +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Failed to create group: [detailed error message]" +} +``` + +### POST /join_from_url +Joins a group using a URL. + +Request Body: +```json +{ + "group_url": "string" // URL containing group information +} +``` + +Response: +```json +{ + "key": "string", // Base64 encoded group ID + "name": "string", // Group name + "created_at": "string" // ISO 8601 timestamp +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group URL: [detailed error message]" +} +``` + +Error Response (500 Internal Server Error): +```json +{ + "status": "error", + "error": "Failed to join group: [detailed error message]" +} +``` + +### GET /{group_id} +Retrieves a specific group by its ID. + +Response: +```json +{ + "key": "string", // Base64 encoded group ID + "name": "string", // Group name + "created_at": "string" // ISO 8601 timestamp +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group not found: [detailed error message]" +} +``` + +### DELETE /{group_id} +Deletes a group by its ID. + +Response: +```json +{ + "status": "success" +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group not found: [detailed error message]" +} +``` + +### POST /{group_id}/refresh +Refreshes a group by its ID. + +Response: +```json +{ + "status": "success", + "repos": [ + { + "name": "string", // Repository name + "can_write": boolean, // Whether the user can write to this repo + "repo_hash": "string", // Hash of the repository + "refreshed_files": [ // List of files that were refreshed + "string" // File names + ], + "all_files": [ // List of all files in the repository + "string" // File names + ] + } + ] +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group not found: [detailed error message]" +} +``` + +## Repositories Endpoints + +Base path: `/api/groups/{group_id}/repos` + +### GET / +Lists all repositories within a group. + +Response: +```json +{ + "repos": [ + { + "key": "string", // Base64 encoded repository ID + "name": "string", // Repository name + "created_at": "string" // ISO 8601 timestamp + } + ] +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group not found: [detailed error message]" +} +``` + +### POST / +Creates a new repository within a group. + +Request Body: +```json +{ + "name": "string" // Name for the new repository +} +``` + +Response: +```json +{ + "key": "string", // Base64 encoded repository ID + "name": "string", // Repository name + "created_at": "string" // ISO 8601 timestamp +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID or repository name: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group not found: [detailed error message]" +} +``` + +### GET /{repo_id} +Retrieves a specific repository within a group. + +Response: +```json +{ + "key": "string", // Base64 encoded repository ID + "name": "string", // Repository name + "created_at": "string" // ISO 8601 timestamp +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID or repository ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group or repository not found: [detailed error message]" +} +``` + +## Media Endpoints + +Base path: `/api/groups/{group_id}/repos/{repo_id}/media` + +### GET / +Lists all files in a repository. + +Response: +```json +{ + "files": [ + { + "name": "string", // File name + "size": number, // File size in bytes + "created_at": "string" // ISO 8601 timestamp + } + ] +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID or repository ID: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group or repository not found: [detailed error message]" +} +``` + +### POST /{file_name} +Uploads a file to a repository. + +Request Body: Binary file content + +Response: +```json +{ + "name": "string", // File name + "updated_collection_hash": "string" // Hash of the updated collection +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID, repository ID, or file content: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group or repository not found: [detailed error message]" +} +``` + +Error Response (413 Payload Too Large): +```json +{ + "status": "error", + "error": "File too large: [detailed error message]" +} +``` + +### GET /{file_name} +Downloads a specific file from a repository. + +Response: Binary file content with appropriate Content-Type header + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID, repository ID, or file name: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group, repository, or file not found: [detailed error message]" +} +``` + +### DELETE /{file_name} +Deletes a specific file from a repository. + +Response: +```json +{ + "collection_hash": "string" // Hash of the updated collection after deletion +} +``` + +Error Response (400 Bad Request): +```json +{ + "status": "error", + "error": "Invalid group ID, repository ID, or file name: [detailed error message]" +} +``` + +Error Response (404 Not Found): +```json +{ + "status": "error", + "error": "Group, repository, or file not found: [detailed error message]" +} +``` \ No newline at end of file diff --git a/README.md b/README.md index 2dac979..206d26f 100644 --- a/README.md +++ b/README.md @@ -13,54 +13,37 @@ Bindings to the save-dweb-backend for the Save Android app. - `./build-android.sh` - You can now recompile the android app. -# API Endpoints +# API Documentation -available HTTP API endpoints. +The Save-Rust API provides HTTP endpoints for managing groups, repositories, and media files. For detailed API documentation including request/response schemas and error handling, please see [API.md](API.md). -## general +## Available Endpoints +### General * `GET /status` - Returns the server status and version. - * `GET /health` - Returns the server health status. - * `POST /api/memberships` - Joins a group. -## Groups - -base path: `/api/groups` - +### Groups +Base path: `/api/groups` * `GET /` - Lists all groups. - * `POST /` - Creates a new group. - * `POST /join_from_url` - Joins a group using a URL. - * `GET /{group_id}` - Retrieves a specific group by its ID. - * `DELETE /{group_id}` - Deletes a group by its ID. - * `POST /{group_id}/refresh` - Refreshes a group by its ID. -## Repositories - -base path: `/api/groups/{group_id}/repos` - +### Repositories +Base path: `/api/groups/{group_id}/repos` * `GET /` - Lists all repositories within a group. - * `POST /` - Creates a new repository within a group. - * `GET /{repo_id}` - Retrieves a specific repository within a group. -## Media - -base path: `/api/groups/{group_id}/repos/{repo_id}/media` - +### Media +Base path: `/api/groups/{group_id}/repos/{repo_id}/media` * `GET /` - Lists all files in a repository. - * `POST /{file_name}` - Uploads a file to a repository. - * `GET /{file_name}` - Downloads a specific file from a repository. - * `DELETE /{file_name}` - Deletes a specific file from a repository. - +For detailed information about request/response formats, error handling, and examples, please refer to the [API Documentation](API.md). \ No newline at end of file