Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .git-msg.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
refactor(auth): split AuthState into auth_state.rs for unit testing

Tauri command bodies can't be unit-tested without spinning up a Tauri
runtime + WebView2 host (mock_runtime crashes with STATUS_ENTRYPOINT_NOT_FOUND
on Windows). Standard pattern: keep #[tauri::command] functions as thin
adapters and put all branchable logic in a separately-testable module.

- Move AuthState, AuthStatus, DeviceCodeView, AuthCommandError, and the
From<AuthError> impl into twitch_auth/auth_state.rs along with all
unit tests (now reach 91.79% coverage on that file).
- commands.rs is now ~50 lines of pure adapter delegation. Added to
codecov ignore list alongside lib.rs and main.rs (same rationale:
Tauri framework wiring without testable branches).
- mod.rs re-exports updated; no external API change.

123 lib tests still pass, clippy clean.
48 changes: 38 additions & 10 deletions apps/desktop/src-tauri/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ pub use host::parse_batch;
#[doc(hidden)]
pub use message::UnifiedMessage;

#[cfg(windows)]
use tauri::Manager;
use tauri::Runtime;
use tauri::{Manager, Runtime};
use tracing_subscriber::EnvFilter;

#[tauri::command]
Expand Down Expand Up @@ -46,25 +44,55 @@ pub fn run() {

tauri::Builder::default()
.plugin(tauri_plugin_shell::init())
.invoke_handler(tauri::generate_handler![get_platform])
.invoke_handler(tauri::generate_handler![
get_platform,
twitch_auth::commands::twitch_auth_status,
twitch_auth::commands::twitch_start_login,
twitch_auth::commands::twitch_complete_login,
twitch_auth::commands::twitch_cancel_login,
twitch_auth::commands::twitch_logout,
])
.setup(setup)
.run(tauri::generate_context!())
.expect("failed to run prismoid");
}

/// Tauri setup hook. On Windows, kicks off the sidecar supervisor which owns
/// the full lifecycle (spawn, bootstrap, drain, respawn-on-terminate). On
/// other platforms the supervisor is not wired up yet (ADR 18), so we log
/// and let the Tauri app launch without it so frontend work can proceed.
/// Tauri setup hook. Builds the shared `AuthManager` + wakeup notifier,
/// registers them as managed state for the auth UI commands, and (on
/// Windows) hands clones to the sidecar supervisor so a successful
/// sign-in wakes it from `waiting_for_auth` immediately. Non-Windows
/// targets log a warning and let the frontend boot without the sidecar
/// (ADR 18).
#[allow(clippy::unnecessary_wraps)]
fn setup<R: Runtime>(app: &mut tauri::App<R>) -> Result<(), Box<dyn std::error::Error>> {
use std::sync::Arc;
use tokio::sync::Notify;
use twitch_auth::{AuthManager, AuthState, KeychainStore, TWITCH_CLIENT_ID};

let http_client = match reqwest::Client::builder()
.redirect(reqwest::redirect::Policy::none())
.build()
{
Ok(client) => client,
Err(err) => {
tracing::error!(
error = %err,
"failed to build reqwest client; skipping auth manager and sidecar"
);
return Ok(());
}
};
let auth = Arc::new(AuthManager::builder(TWITCH_CLIENT_ID).build(KeychainStore, http_client));
let wakeup = Arc::new(Notify::new());
app.manage(AuthState::new(auth.clone(), wakeup.clone()));

#[cfg(windows)]
{
sidecar_supervisor::spawn(app.app_handle().clone());
sidecar_supervisor::spawn(app.app_handle().clone(), auth, wakeup);
}
#[cfg(not(windows))]
{
let _ = app;
let _ = (auth, wakeup);
tracing::warn!(
"sidecar lifecycle is Windows-only for now; launching frontend without sidecar"
);
Expand Down
56 changes: 28 additions & 28 deletions apps/desktop/src-tauri/src/sidecar_supervisor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ use crate::message::UnifiedMessage;
#[cfg(windows)]
use crate::ringbuf::{RawHandle, RingBufReader, WaitOutcome, DEFAULT_CAPACITY};
#[cfg(windows)]
use crate::twitch_auth::{AuthError, AuthManager, KeychainStore, TWITCH_CLIENT_ID};
use crate::twitch_auth::{AuthError, AuthManager, TWITCH_CLIENT_ID};
#[cfg(windows)]
use tokio::sync::Notify;

/// Supervisor timings. Defaults are production values; tests can override.
#[derive(Debug, Clone)]
Expand Down Expand Up @@ -97,36 +99,32 @@ pub struct SidecarStatus {
}

/// Kicks off the supervisor. Returns immediately; the supervisor runs on
/// a tauri async task until the app exits.
/// a tauri async task until the app exits. The caller passes in the
/// shared [`AuthManager`] (also held by Tauri managed state for the
/// auth UI commands) and a `wakeup` notifier the supervisor awaits
/// while idle in `waiting_for_auth` so a successful sign-in starts the
/// sidecar within milliseconds instead of waiting out the 30 s poll.
#[cfg(windows)]
pub fn spawn<R: Runtime>(app: AppHandle<R>) {
pub fn spawn<R: Runtime>(app: AppHandle<R>, auth: Arc<AuthManager>, wakeup: Arc<Notify>) {
let cfg = SupervisorConfig::default();
tauri::async_runtime::spawn(async move {
supervise(app, cfg).await;
supervise(app, cfg, auth, wakeup).await;
});
}

#[cfg(windows)]
async fn supervise<R: Runtime>(app: AppHandle<R>, cfg: SupervisorConfig) {
// `client_id` is a compile-time const (RFC 8252 public client; not a
// secret). The broadcaster/user identifiers ride inside the persisted
// [`TwitchTokens`] itself (populated from the DCF response, stable
// across refresh) so the supervisor never needs env vars or user
// input for them. Tokens live in the OS keychain, seeded via
async fn supervise<R: Runtime>(
app: AppHandle<R>,
cfg: SupervisorConfig,
auth: Arc<AuthManager>,
wakeup: Arc<Notify>,
) {
// `client_id` lives in the shared AuthManager; broadcaster/user
// identifiers ride inside the persisted [`TwitchTokens`] itself
// (populated from the DCF response, stable across refresh).
// Tokens live in the OS keychain, seeded via the SignIn overlay or
// `cargo run --bin prismoid_dcf` and rotated automatically below
// (ADR 29).
let http_client = match reqwest::Client::builder()
.redirect(reqwest::redirect::Policy::none())
.build()
{
Ok(c) => c,
Err(e) => {
tracing::error!(error = %e, "failed to build reqwest client; supervisor idling");
return;
}
};
let auth = AuthManager::builder(TWITCH_CLIENT_ID).build(KeychainStore, http_client);

let mut attempt: u32 = 0;
let mut backoff = cfg.initial_backoff;

Expand All @@ -140,14 +138,16 @@ async fn supervise<R: Runtime>(app: AppHandle<R>, cfg: SupervisorConfig) {
Ok(t) => t,
Err(AuthError::NoTokens) | Err(AuthError::RefreshTokenInvalid) => {
tracing::warn!(
"no valid Twitch tokens in keychain; run `cargo run --bin prismoid_dcf` to seed"
"no valid Twitch tokens in keychain; click Sign in with Twitch in the app"
);
emit_status(&app, "waiting_for_auth", attempt, None);
// Poll the keychain every 30 s so the user can seed
// mid-run without a restart. Not a respawn-pressure
// scenario, so we stay on a fixed interval rather than
// the exponential ladder.
tokio::time::sleep(Duration::from_secs(30)).await;
// Wait on the shared notifier (fired by
// `twitch_complete_login` / `twitch_logout`) with a 30 s
// floor so a process that boots before the keychain
// service still recovers without a restart. Not a
// respawn-pressure scenario, so we stay on a fixed
// interval rather than the exponential ladder.
let _ = tokio::time::timeout(Duration::from_secs(30), wakeup.notified()).await;
continue;
}
Err(e) => {
Expand Down
Loading
Loading