diff --git a/OpenOats/Sources/OpenOats/App/MeetingDetectionController.swift b/OpenOats/Sources/OpenOats/App/MeetingDetectionController.swift index c519ad90..c922dd3d 100644 --- a/OpenOats/Sources/OpenOats/App/MeetingDetectionController.swift +++ b/OpenOats/Sources/OpenOats/App/MeetingDetectionController.swift @@ -1,9 +1,6 @@ import AppKit import Foundation import Observation -import os - -private let logger = Logger(subsystem: "com.openoats.app", category: "MeetingDetection") /// One-shot events emitted by the detection controller for consumption by the coordinator. enum DetectionEvent: Sendable { @@ -173,7 +170,7 @@ final class MeetingDetectionController { installSleepObserver() if settings.detectionLogEnabled { - logger.info("Detection system started") + Log.meetingDetection.info("Detection system started") } } @@ -208,7 +205,7 @@ final class MeetingDetectionController { detectedApp = nil lastUtteranceAt = nil - logger.info("Detection system stopped") + Log.meetingDetection.info("Detection system stopped") } // MARK: - Sleep Observer @@ -222,7 +219,7 @@ final class MeetingDetectionController { Task { @MainActor [weak self] in guard let self else { return } if self.activeSettings?.detectionLogEnabled == true { - logger.info("System sleep detected, yielding event") + Log.meetingDetection.info("System sleep detected, yielding event") } self.eventContinuation.yield(.systemSleep) } @@ -248,7 +245,7 @@ final class MeetingDetectionController { let elapsed = Date().timeIntervalSince(lastUtterance) if elapsed >= Double(timeoutMinutes) * 60.0 { if self.activeSettings?.detectionLogEnabled == true { - logger.info("Silence timeout (\(timeoutMinutes)m), stopping") + Log.meetingDetection.info("Silence timeout (\(timeoutMinutes, privacy: .public)m), stopping") } self.eventContinuation.yield(.silenceTimeout) break @@ -292,7 +289,7 @@ final class MeetingDetectionController { if !isRunning { if self.activeSettings?.detectionLogEnabled == true { - logger.info("Meeting app exited (\(bundleID, privacy: .public)), yielding event") + Log.meetingDetection.info("Meeting app exited (\(bundleID, privacy: .public)), yielding event") } self.eventContinuation.yield(.meetingAppExited) break @@ -340,13 +337,13 @@ final class MeetingDetectionController { } if activeSettings?.detectionLogEnabled == true { - logger.info("Detected: \(app?.name ?? "unknown", privacy: .public)") + Log.meetingDetection.info("Detected: \(app?.name ?? "unknown", privacy: .public)") } let posted = await notificationService?.postMeetingDetected(appName: app?.name) ?? false if !posted { if activeSettings?.detectionLogEnabled == true { - logger.debug("Failed to post notification (permission denied?)") + Log.meetingDetection.debug("Failed to post notification (permission denied?)") } } } @@ -385,7 +382,7 @@ final class MeetingDetectionController { } if activeSettings?.detectionLogEnabled == true { - logger.debug("User dismissed as not a meeting") + Log.meetingDetection.debug("User dismissed as not a meeting") } } @@ -402,7 +399,7 @@ final class MeetingDetectionController { } if activeSettings?.detectionLogEnabled == true { - logger.debug("User chose to ignore this app permanently") + Log.meetingDetection.debug("User chose to ignore this app permanently") } } @@ -410,7 +407,7 @@ final class MeetingDetectionController { eventContinuation.yield(.dismissed) if activeSettings?.detectionLogEnabled == true { - logger.debug("User dismissed notification") + Log.meetingDetection.debug("User dismissed notification") } } @@ -418,7 +415,7 @@ final class MeetingDetectionController { eventContinuation.yield(.timeout) if activeSettings?.detectionLogEnabled == true { - logger.debug("Notification timed out") + Log.meetingDetection.debug("Notification timed out") } } } diff --git a/OpenOats/Sources/OpenOats/Audio/AudioRecorder.swift b/OpenOats/Sources/OpenOats/Audio/AudioRecorder.swift index 74845b7d..73b38e77 100644 --- a/OpenOats/Sources/OpenOats/Audio/AudioRecorder.swift +++ b/OpenOats/Sources/OpenOats/Audio/AudioRecorder.swift @@ -1,4 +1,5 @@ @preconcurrency import AVFoundation +import os /// Records mic and system audio to temporary CAF files during a session, /// then merges and encodes them into a single M4A (AAC) file on finalization. @@ -67,14 +68,14 @@ final class AudioRecorder: @unchecked Sendable { guard let monoFormat = AVAudioFormat( standardFormatWithSampleRate: buffer.format.sampleRate, channels: 1 ) else { - diagLog("[RECORDER] mic file SKIP: cannot create mono format at \(buffer.format.sampleRate)Hz") + Log.recorder.error("Mic file SKIP: cannot create mono format at \(buffer.format.sampleRate, privacy: .public)Hz") return } do { micFile = try AVAudioFile(forWriting: url, settings: monoFormat.settings) - diagLog("[RECORDER] mic file created: \(url.lastPathComponent) mono at \(buffer.format.sampleRate)Hz") + Log.recorder.info("Mic file created: \(url.lastPathComponent, privacy: .private(mask: .hash)) mono at \(buffer.format.sampleRate, privacy: .public)Hz") } catch { - diagLog("[RECORDER] mic file creation FAILED: \(error)") + Log.recorder.error("Mic file creation failed: \(error, privacy: .public)") return } } @@ -156,19 +157,19 @@ final class AudioRecorder: @unchecked Sendable { } } } else { - diagLog("[RECORDER] mic write SKIP: unsupported buffer format \(buffer.format.commonFormat.rawValue)") + Log.recorder.error("Mic write SKIP: unsupported buffer format \(buffer.format.commonFormat.rawValue, privacy: .public)") return } micWriteCount += 1 if micWriteCount <= 5 || micWriteCount % 100 == 0 { let peak = Self.peakLevel(monoBuf) - diagLog("[RECORDER] mic write #\(micWriteCount): frames=\(frames) peak=\(peak)") + Log.recorder.debug("Mic write #\(self.micWriteCount, privacy: .public): frames=\(frames, privacy: .public) peak=\(peak, privacy: .public)") } do { try micFile?.write(from: monoBuf) } catch { - diagLog("[RECORDER] mic write ERROR: \(error)") + Log.recorder.error("Mic write error: \(error, privacy: .public)") } } } @@ -185,7 +186,7 @@ final class AudioRecorder: @unchecked Sendable { interleaved: buffer.format.isInterleaved ) } catch { - diagLog("[RECORDER] sys file creation FAILED: \(error)") + Log.recorder.error("Sys file creation failed: \(error, privacy: .public)") return } } @@ -204,7 +205,7 @@ final class AudioRecorder: @unchecked Sendable { do { try sysFile?.write(from: buffer) } catch { - diagLog("[RECORDER] sys write ERROR: \(error)") + Log.recorder.error("Sys write error: \(error, privacy: .public)") } } } @@ -310,7 +311,7 @@ final class AudioRecorder: @unchecked Sendable { }() guard micReader != nil || sysReader != nil else { - diagLog("[RECORDER] No audio data recorded") + Log.recorder.info("No audio data recorded") return } @@ -318,12 +319,12 @@ final class AudioRecorder: @unchecked Sendable { guard let targetFormat = AVAudioFormat(standardFormatWithSampleRate: targetRate, channels: 1) else { return } if let mic = micReader { - diagLog("[RECORDER] mic temp: \(mic.length) frames, format=\(mic.processingFormat)") + Log.recorder.info("Mic temp: \(mic.length, privacy: .public) frames, format=\(mic.processingFormat, privacy: .public)") } if let sys = sysReader { - diagLog("[RECORDER] sys temp: \(sys.length) frames, format=\(sys.processingFormat)") + Log.recorder.info("Sys temp: \(sys.length, privacy: .public) frames, format=\(sys.processingFormat, privacy: .public)") if let eff = sysEffectiveRate { - diagLog("[RECORDER] sys effective sample rate: \(eff) Hz (declared: \(sys.processingFormat.sampleRate) Hz)") + Log.recorder.info("Sys effective sample rate: \(eff, privacy: .public) Hz (declared: \(sys.processingFormat.sampleRate, privacy: .public) Hz)") } } @@ -334,7 +335,7 @@ final class AudioRecorder: @unchecked Sendable { let effectiveRate = sysEffectiveRate, abs(effectiveRate - sysReader.processingFormat.sampleRate) > 1000 { - diagLog("[RECORDER] sys rate mismatch: effective=\(effectiveRate) vs declared=\(sysReader.processingFormat.sampleRate), resampling from effective rate") + Log.recorder.info("Sys rate mismatch: effective=\(effectiveRate, privacy: .public) vs declared=\(sysReader.processingFormat.sampleRate, privacy: .public), resampling from effective rate") sysSamples = Self.readAllMono( file: sysReader, targetRate: targetRate, @@ -347,7 +348,7 @@ final class AudioRecorder: @unchecked Sendable { let micPeak = micSamples.reduce(Float(0)) { max($0, abs($1)) } let sysPeak = sysSamples.reduce(Float(0)) { max($0, abs($1)) } - diagLog("[RECORDER] after readAllMono: micSamples=\(micSamples.count) micPeak=\(micPeak) sysSamples=\(sysSamples.count) sysPeak=\(sysPeak)") + Log.recorder.info("After readAllMono: micSamples=\(micSamples.count, privacy: .public) micPeak=\(micPeak, privacy: .public) sysSamples=\(sysSamples.count, privacy: .public) sysPeak=\(sysPeak, privacy: .public)") let length = max(micSamples.count, sysSamples.count) guard length > 0 else { return } @@ -364,7 +365,7 @@ final class AudioRecorder: @unchecked Sendable { commonFormat: .pcmFormatFloat32, interleaved: false ) else { - diagLog("[RECORDER] Failed to create output file") + Log.recorder.error("Failed to create output file") return } @@ -387,7 +388,7 @@ final class AudioRecorder: @unchecked Sendable { offset += count } - diagLog("[RECORDER] Saved \(outputURL.lastPathComponent) (\(length) frames)") + Log.recorder.info("Saved \(outputURL.lastPathComponent, privacy: .private(mask: .hash)) (\(length, privacy: .public) frames)") } private static func readAllMono( diff --git a/OpenOats/Sources/OpenOats/Audio/MicCapture.swift b/OpenOats/Sources/OpenOats/Audio/MicCapture.swift index c01451f2..902f4e71 100644 --- a/OpenOats/Sources/OpenOats/Audio/MicCapture.swift +++ b/OpenOats/Sources/OpenOats/Audio/MicCapture.swift @@ -4,7 +4,6 @@ import CoreAudio import Foundation import os -private let micLog = Logger(subsystem: "com.openoats", category: "MicCapture") /// Captures microphone audio via AVAudioEngine and streams PCM buffers. final class MicCapture: @unchecked Sendable { @@ -55,21 +54,21 @@ final class MicCapture: @unchecked Sendable { errorHolder.value = nil self._hasCapturedFrames.value = false - diagLog("[MIC-1] bufferStream called, deviceID=\(String(describing: deviceID))") + Log.mic.info("bufferStream called, deviceID=\(String(describing: deviceID), privacy: .public)") let engine = self.makeFreshEngine() - diagLog("[MIC-1a] fresh engine created") + Log.mic.info("Fresh engine created") let inputNode = engine.inputNode - diagLog("[MIC-1b] input node ready") + Log.mic.info("Input node ready") // Enable voice processing (AEC + noise suppression) if requested if echoCancellation { do { try inputNode.setVoiceProcessingEnabled(true) - diagLog("[MIC-1c] voice processing (AEC) enabled") + Log.mic.info("Voice processing (AEC) enabled") } catch { - diagLog("[MIC-1c] failed to enable voice processing: \(error.localizedDescription)") + Log.mic.error("Failed to enable voice processing: \(error, privacy: .public)") } } @@ -78,7 +77,7 @@ final class MicCapture: @unchecked Sendable { if let id = deviceID { guard let inAU = inputNode.audioUnit else { let msg = "inputNode has no audio unit after prepare" - diagLog("[MIC-2-FAIL] \(msg)") + Log.mic.error("\(msg, privacy: .public)") errorHolder.value = msg continuation.finish() return @@ -92,10 +91,10 @@ final class MicCapture: @unchecked Sendable { &devID, UInt32(MemoryLayout.size) ) - diagLog("[MIC-2] setInputDevice status=\(inStatus) (0=ok)") + Log.mic.info("setInputDevice status=\(inStatus, privacy: .public) (0=ok)") resolvedDeviceID = id } else { - diagLog("[MIC-2] no deviceID, using system default") + Log.mic.info("No deviceID, using system default") resolvedDeviceID = Self.defaultInputDeviceID() } @@ -108,15 +107,15 @@ final class MicCapture: @unchecked Sendable { if let devID = resolvedDeviceID, let hwRate = Self.deviceNominalSampleRate(for: devID), hwRate > 0, hwRate != sampleRate { - diagLog("[MIC-3] hardware sr=\(hwRate) differs from inputNode sr=\(sampleRate), using hardware rate") + Log.mic.info("Hardware sr=\(hwRate, privacy: .public) differs from inputNode sr=\(sampleRate, privacy: .public), using hardware rate") sampleRate = hwRate } - diagLog("[MIC-3] inputNode format: sr=\(format.sampleRate) ch=\(format.channelCount) interleaved=\(format.isInterleaved) commonFormat=\(format.commonFormat.rawValue), effective sr=\(sampleRate)") + Log.mic.info("inputNode format: sr=\(format.sampleRate, privacy: .public) ch=\(format.channelCount, privacy: .public) interleaved=\(format.isInterleaved, privacy: .public) commonFormat=\(format.commonFormat.rawValue, privacy: .public), effective sr=\(sampleRate, privacy: .public)") guard sampleRate > 0 && format.channelCount > 0 else { let msg = "Invalid audio format: sr=\(sampleRate) ch=\(format.channelCount)" - diagLog("[MIC-3-FAIL] \(msg)") + Log.mic.error("\(msg, privacy: .public)") errorHolder.value = msg continuation.finish() return @@ -130,14 +129,14 @@ final class MicCapture: @unchecked Sendable { tapFormat = f } else if sampleRate != format.sampleRate, let f = AVAudioFormat(standardFormatWithSampleRate: format.sampleRate, channels: format.channelCount) { - diagLog("[MIC-4] hardware-rate format failed, using node rate \(format.sampleRate)") + Log.mic.info("Hardware-rate format failed, using node rate \(format.sampleRate, privacy: .public)") tapFormat = f } else { - diagLog("[MIC-4] standard formats failed, using native input format") + Log.mic.info("Standard formats failed, using native input format") tapFormat = format } - diagLog("[MIC-4] tapFormat: sr=\(tapFormat.sampleRate) ch=\(tapFormat.channelCount)") + Log.mic.info("tapFormat: sr=\(tapFormat.sampleRate, privacy: .public) ch=\(tapFormat.channelCount, privacy: .public)") let muted = self._muted var tapCallCount = 0 @@ -148,7 +147,7 @@ final class MicCapture: @unchecked Sendable { level.value = min(rms * 25, 1.0) if tapCallCount <= 5 || tapCallCount % 100 == 0 { - diagLog("[MIC-6] tap #\(tapCallCount): frames=\(buffer.frameLength) rms=\(rms) level=\(level.value)") + Log.mic.debug("tap #\(tapCallCount, privacy: .public): frames=\(buffer.frameLength, privacy: .public) rms=\(rms, privacy: .public) level=\(level.value, privacy: .public)") } guard !muted.value else { return } @@ -156,21 +155,21 @@ final class MicCapture: @unchecked Sendable { } self.hasTapInstalled = true - diagLog("[MIC-5] tap installed, preparing engine...") + Log.mic.info("Tap installed, preparing engine") continuation.onTermination = { _ in - diagLog("[MIC-TERM] stream terminated") + Log.mic.info("Stream terminated") // Audio hardware teardown handled by stop() — not here, // so finishStream() can drain without premature engine shutdown. } do { - diagLog("[MIC-7] engine prepared, starting...") + Log.mic.info("Engine prepared, starting") try engine.start() - diagLog("[MIC-8] engine started successfully, isRunning=\(engine.isRunning)") + Log.mic.info("Engine started successfully, isRunning=\(engine.isRunning, privacy: .public)") } catch { let msg = "Mic failed: \(error.localizedDescription)" - print("[MIC-8-FAIL] \(msg)") + Log.mic.error("Mic failed: \(error, privacy: .public)") errorHolder.value = msg self.hasTapInstalled = false continuation.finish() diff --git a/OpenOats/Sources/OpenOats/Intelligence/BatchTextCleaner.swift b/OpenOats/Sources/OpenOats/Intelligence/BatchTextCleaner.swift index d86cec11..7a357077 100644 --- a/OpenOats/Sources/OpenOats/Intelligence/BatchTextCleaner.swift +++ b/OpenOats/Sources/OpenOats/Intelligence/BatchTextCleaner.swift @@ -1,6 +1,5 @@ import Foundation import Observation -import os /// Batch text cleaner that sends transcript chunks to an LLM /// to remove filler words and fix punctuation, preserving meaning. @@ -31,7 +30,6 @@ final class BatchTextCleaner { set { withMutation(keyPath: \.error) { _error = newValue } } } - private nonisolated static let logger = Logger(subsystem: "com.openoats.app", category: "BatchTextCleaner") private let client = OpenRouterClient() private var currentTask: Task<[SessionRecord], Never>? @@ -254,7 +252,7 @@ final class BatchTextCleaner { return parseResponse(response, originalRecords: records) } catch { - logger.error("Cleanup chunk failed: \(error.localizedDescription)") + Log.batchTextCleaner.error("Cleanup chunk failed: \(error, privacy: .public)") return nil } } diff --git a/OpenOats/Sources/OpenOats/Intelligence/MarkdownMeetingWriter.swift b/OpenOats/Sources/OpenOats/Intelligence/MarkdownMeetingWriter.swift index 64aae0bc..d2274593 100644 --- a/OpenOats/Sources/OpenOats/Intelligence/MarkdownMeetingWriter.swift +++ b/OpenOats/Sources/OpenOats/Intelligence/MarkdownMeetingWriter.swift @@ -1,7 +1,4 @@ import Foundation -import os - -private let writerLogger = Logger(subsystem: "com.openoats.app", category: "MarkdownMeetingWriter") /// Produces spec-compliant openoats/v1 Markdown files from session data. /// @@ -49,7 +46,7 @@ enum MarkdownMeetingWriter { outputDirectory: URL ) -> URL? { guard !records.isEmpty else { - writerLogger.warning("MarkdownMeetingWriter: no records, skipping write") + Log.markdownMeetingWriter.warning("MarkdownMeetingWriter: no records, skipping write") return nil } @@ -70,10 +67,10 @@ enum MarkdownMeetingWriter { do { try content.write(to: fileURL, atomically: true, encoding: .utf8) try fm.setAttributes([.posixPermissions: 0o600], ofItemAtPath: fileURL.path) - writerLogger.info("Wrote meeting markdown: \(fileURL.lastPathComponent, privacy: .public)") + Log.markdownMeetingWriter.info("Wrote meeting markdown: \(fileURL.lastPathComponent, privacy: .public)") return fileURL } catch { - writerLogger.error("Failed to write markdown: \(error.localizedDescription, privacy: .public)") + Log.markdownMeetingWriter.error("Failed to write markdown: \(error, privacy: .public)") return nil } } diff --git a/OpenOats/Sources/OpenOats/Meeting/WebhookService.swift b/OpenOats/Sources/OpenOats/Meeting/WebhookService.swift index 9bfac952..1d1894a9 100644 --- a/OpenOats/Sources/OpenOats/Meeting/WebhookService.swift +++ b/OpenOats/Sources/OpenOats/Meeting/WebhookService.swift @@ -1,12 +1,9 @@ import Foundation import CryptoKit -import os.log /// Sends a POST request to a user-configured webhook URL when a meeting ends. /// Uses only data that already exists at session finalization time. enum WebhookService { - private static let logger = Logger(subsystem: "com.openoats.app", category: "Webhook") - struct Payload: Codable { let sessionID: String let startedAt: Date @@ -65,7 +62,7 @@ enum WebhookService { encoder.dateEncodingStrategy = .iso8601 guard let body = try? encoder.encode(payload) else { - logger.error("Webhook: failed to encode payload") + Log.webhook.error("Webhook: failed to encode payload") return } @@ -85,13 +82,13 @@ enum WebhookService { do { let (_, response) = try await URLSession.shared.data(for: request) if let http = response as? HTTPURLResponse, (200..<300).contains(http.statusCode) { - logger.info("Webhook delivered (attempt \(attempt + 1), status \(http.statusCode))") + Log.webhook.info("Webhook delivered (attempt \(attempt + 1, privacy: .public), status \(http.statusCode, privacy: .public))") return } let statusCode = (response as? HTTPURLResponse)?.statusCode ?? -1 - logger.warning("Webhook attempt \(attempt + 1) returned status \(statusCode)") + Log.webhook.warning("Webhook attempt \(attempt + 1, privacy: .public) returned status \(statusCode, privacy: .public)") } catch { - logger.warning("Webhook attempt \(attempt + 1) failed: \(error.localizedDescription)") + Log.webhook.warning("Webhook attempt \(attempt + 1, privacy: .public) failed: \(error, privacy: .public)") } if attempt < 2 { @@ -100,7 +97,7 @@ enum WebhookService { } } - logger.error("Webhook delivery failed after 3 attempts to \(url.absoluteString)") + Log.webhook.error("Webhook delivery failed after 3 attempts to \(url.absoluteString, privacy: .private)") } private static func hmacSHA256(data: Data, key: String) -> String { diff --git a/OpenOats/Sources/OpenOats/Models/TranscriptStore.swift b/OpenOats/Sources/OpenOats/Models/TranscriptStore.swift index 390e554a..b1495d4e 100644 --- a/OpenOats/Sources/OpenOats/Models/TranscriptStore.swift +++ b/OpenOats/Sources/OpenOats/Models/TranscriptStore.swift @@ -1,5 +1,6 @@ import Foundation import Observation +import os @Observable @MainActor @@ -203,11 +204,12 @@ final class TranscriptStore { guard similarity >= acousticEchoSimilarityThreshold || containsOther else { continue } - diagLog( - "[TRANSCRIPT-ECHO] dropped mic utterance as system-audio echo " + - "dt=\(String(format: "%.2f", timeDelta)) " + - "similarity=\(String(format: "%.2f", similarity)) " + - "you='\(utterance.text.prefix(80))' them='\(candidate.text.prefix(80))'" + let dtFormatted = String(format: "%.2f", timeDelta) + let simFormatted = String(format: "%.2f", similarity) + let youSnippet = String(utterance.text.prefix(80)) + let themSnippet = String(candidate.text.prefix(80)) + Log.transcript.info( + "Dropped mic utterance as system-audio echo dt=\(dtFormatted, privacy: .public) similarity=\(simFormatted, privacy: .public) you='\(youSnippet, privacy: .private)' them='\(themSnippet, privacy: .private)'" ) return true } diff --git a/OpenOats/Sources/OpenOats/Storage/GranolaImporter.swift b/OpenOats/Sources/OpenOats/Storage/GranolaImporter.swift index 66dfcccb..43ceb1b7 100644 --- a/OpenOats/Sources/OpenOats/Storage/GranolaImporter.swift +++ b/OpenOats/Sources/OpenOats/Storage/GranolaImporter.swift @@ -1,7 +1,4 @@ import Foundation -import os - -private let log = Logger(subsystem: "com.openoats.app", category: "GranolaImporter") // MARK: - Granola API Models @@ -141,7 +138,7 @@ actor GranolaImporter { onProgress(.fetching(progress: "Fetching note list from Granola...")) let notes = try await fetchAllNotes(apiKey: apiKey) - log.info("Fetched \(notes.count) notes from Granola") + Log.granolaImporter.info("Fetched \(notes.count, privacy: .public) notes from Granola") if notes.isEmpty { onProgress(.completed(imported: 0, skipped: 0)) @@ -174,7 +171,7 @@ actor GranolaImporter { try await importSingleNote(fullNote, sessionRepository: sessionRepository) imported += 1 } catch { - log.error("Failed to import note \(noteSummary.id): \(error.localizedDescription, privacy: .public)") + Log.granolaImporter.error("Failed to import note \(noteSummary.id, privacy: .public): \(error, privacy: .public)") // Continue with remaining notes } @@ -266,7 +263,7 @@ actor GranolaImporter { await sessionRepository.saveNotes(sessionID: sessionID, notes: generatedNotes) } - log.info("Imported Granola note \(note.id) as session \(sessionID)") + Log.granolaImporter.debug("Imported Granola note \(note.id, privacy: .public) as session \(sessionID, privacy: .public)") } } diff --git a/OpenOats/Sources/OpenOats/Storage/SessionRepository.swift b/OpenOats/Sources/OpenOats/Storage/SessionRepository.swift index 07f5ffc8..5ab40adf 100644 --- a/OpenOats/Sources/OpenOats/Storage/SessionRepository.swift +++ b/OpenOats/Sources/OpenOats/Storage/SessionRepository.swift @@ -1,7 +1,4 @@ import Foundation -import os - -private let repoLog = Logger(subsystem: "com.openoats.app", category: "SessionRepository") // MARK: - Supporting Types @@ -434,7 +431,7 @@ actor SessionRepository { } try fm.moveItem(at: tempURL, to: finalURL) } catch { - repoLog.error("Failed to write final transcript: \(error.localizedDescription, privacy: .public)") + Log.sessionRepository.error("Failed to write final transcript: \(error, privacy: .public)") } // Mirror to notesFolderPath @@ -999,7 +996,7 @@ actor SessionRepository { try data.write(to: url, options: .atomic) try? FileManager.default.setAttributes([.posixPermissions: 0o600], ofItemAtPath: url.path) } catch { - repoLog.error("Failed to write session.json: \(error.localizedDescription, privacy: .public)") + Log.sessionRepository.error("Failed to write session.json: \(error, privacy: .public)") } } @@ -1020,7 +1017,7 @@ actor SessionRepository { } private func reportWriteError(_ message: String) { - repoLog.error("\(message, privacy: .public)") + Log.sessionRepository.error("\(message, privacy: .public)") guard !hasReportedWriteError else { return } hasReportedWriteError = true onWriteError?(message) @@ -1166,7 +1163,7 @@ actor SessionRepository { try? fm.removeItem(at: micLegacy) try? fm.removeItem(at: sysLegacy) try? fm.removeItem(at: item.appendingPathComponent("batch-meta.json")) - repoLog.info("Cleaned up orphaned batch audio in \(name, privacy: .public)") + Log.sessionRepository.info("Cleaned up orphaned batch audio in \(name, privacy: .public)") } } } diff --git a/OpenOats/Sources/OpenOats/Transcription/AcousticEchoFilter.swift b/OpenOats/Sources/OpenOats/Transcription/AcousticEchoFilter.swift index af9ba9d0..46c238de 100644 --- a/OpenOats/Sources/OpenOats/Transcription/AcousticEchoFilter.swift +++ b/OpenOats/Sources/OpenOats/Transcription/AcousticEchoFilter.swift @@ -1,4 +1,5 @@ import Foundation +import os /// Shared acoustic echo suppression logic. /// Detects when mic (YOU) utterances are echoes of system (THEM) audio based on @@ -37,11 +38,12 @@ enum AcousticEchoFilter { normalizedThem.contains(normalizedYou) if similarity >= similarityThreshold || containsOther { - diagLog( - "[ECHO-FILTER] suppressed mic record as echo " + - "dt=\(String(format: "%.2f", timeDelta)) " + - "sim=\(String(format: "%.2f", similarity)) " + - "mic='\(micRecord.text.prefix(80))' sys='\(sysRecord.text.prefix(80))'" + let dtFormatted = String(format: "%.2f", timeDelta) + let simFormatted = String(format: "%.2f", similarity) + let micSnippet = String(micRecord.text.prefix(80)) + let sysSnippet = String(sysRecord.text.prefix(80)) + Log.echo.info( + "Suppressed mic record as echo dt=\(dtFormatted, privacy: .public) sim=\(simFormatted, privacy: .public) mic='\(micSnippet, privacy: .private)' sys='\(sysSnippet, privacy: .private)'" ) return true } diff --git a/OpenOats/Sources/OpenOats/Transcription/BatchAudioTranscriber.swift b/OpenOats/Sources/OpenOats/Transcription/BatchAudioTranscriber.swift index c61187a9..f3c3435c 100644 --- a/OpenOats/Sources/OpenOats/Transcription/BatchAudioTranscriber.swift +++ b/OpenOats/Sources/OpenOats/Transcription/BatchAudioTranscriber.swift @@ -1,8 +1,5 @@ @preconcurrency import AVFoundation import FluidAudio -import os - -private let batchLog = Logger(subsystem: "com.openoats.app", category: "BatchTranscription") /// Offline two-pass transcription engine that processes recorded CAF files /// using a higher-quality model after a meeting ends. @@ -49,10 +46,10 @@ actor BatchAudioTranscriber { ) } catch is CancellationError { await self.setStatus(.cancelled) - batchLog.info("Batch transcription cancelled for \(sessionID)") + Log.batchTranscription.info("Batch transcription cancelled for \(sessionID, privacy: .public)") } catch { await self.setStatus(.failed(error.localizedDescription)) - batchLog.error("Batch transcription failed: \(error.localizedDescription)") + Log.batchTranscription.error("Batch transcription failed: \(error, privacy: .public)") } } currentTask = task @@ -94,11 +91,11 @@ actor BatchAudioTranscriber { } catch is CancellationError { await self.setStatus(.cancelled) await self.setIsImporting(false) - batchLog.info("Audio import cancelled for \(sessionID)") + Log.batchTranscription.info("Audio import cancelled for \(sessionID, privacy: .public)") } catch { await self.setStatus(.failed(error.localizedDescription)) await self.setIsImporting(false) - batchLog.error("Audio import failed: \(error.localizedDescription)") + Log.batchTranscription.error("Audio import failed: \(error, privacy: .public)") } } currentTask = task @@ -112,13 +109,13 @@ actor BatchAudioTranscriber { locale: Locale, sessionRepository: SessionRepository ) async throws { - batchLog.info("Starting audio import for \(sessionID) from \(url.lastPathComponent)") + Log.batchTranscription.info("Starting audio import for \(sessionID, privacy: .public) from \(url.lastPathComponent, privacy: .public)") status = .loading(model: model.displayName) // Prepare backend and VAD let backend = model.makeBackend() try await backend.prepare { statusMsg in - batchLog.info("Backend: \(statusMsg)") + Log.batchTranscription.debug("Backend: \(statusMsg, privacy: .public)") } try Task.checkCancellation() @@ -157,7 +154,7 @@ actor BatchAudioTranscriber { try Task.checkCancellation() guard !records.isEmpty else { - batchLog.warning("Audio import produced no records for \(sessionID)") + Log.batchTranscription.warning("Audio import produced no records for \(sessionID, privacy: .public)") status = .failed("No speech detected in the audio file") isImporting = false return @@ -181,7 +178,7 @@ actor BatchAudioTranscriber { status = .completed(sessionID: sessionID) isImporting = false - batchLog.info("Audio import completed for \(sessionID): \(records.count) records") + Log.batchTranscription.info("Audio import completed for \(sessionID, privacy: .public): \(records.count, privacy: .public) records") } // MARK: - Private @@ -203,13 +200,13 @@ actor BatchAudioTranscriber { enableDiarization: Bool, diarizationVariant: DiarizationVariant ) async throws { - batchLog.info("Starting batch transcription for \(sessionID) with \(model.rawValue)") + Log.batchTranscription.info("Starting batch transcription for \(sessionID, privacy: .public) with \(model.rawValue, privacy: .public)") status = .loading(model: model.displayName) // Load batch metadata let urls = await sessionRepository.batchAudioURLs(sessionID: sessionID) guard urls.mic != nil || urls.sys != nil else { - batchLog.warning("No batch audio found for \(sessionID)") + Log.batchTranscription.warning("No batch audio found for \(sessionID, privacy: .public)") status = .failed("No audio files found") return } @@ -220,7 +217,7 @@ actor BatchAudioTranscriber { // Create and prepare backend let backend = model.makeBackend() try await backend.prepare { statusMsg in - batchLog.info("Backend: \(statusMsg)") + Log.batchTranscription.debug("Backend: \(statusMsg, privacy: .public)") } try Task.checkCancellation() @@ -252,7 +249,7 @@ actor BatchAudioTranscriber { progressScale: 1.0 / Double(totalFiles) ) filesProcessed += 1 - batchLog.info("Mic transcription: \(micRecords.count) records") + Log.batchTranscription.debug("Mic transcription: \(micRecords.count, privacy: .public) records") } try Task.checkCancellation() @@ -261,7 +258,7 @@ actor BatchAudioTranscriber { // Optionally run diarization on the full system audio var batchDiarizer: DiarizationManager? if enableDiarization { - batchLog.info("Running LS-EEND diarization on system audio...") + Log.batchTranscription.info("Running LS-EEND diarization on system audio...") let dm = DiarizationManager() let variant = LSEENDVariant(rawValue: diarizationVariant.rawValue) ?? .dihard3 try await dm.load(variant: variant) @@ -271,7 +268,7 @@ actor BatchAudioTranscriber { try await dm.feedAudio(samples) await dm.finalize() batchDiarizer = dm - batchLog.info("Diarization complete") + Log.batchTranscription.info("Diarization complete") } sysRecords = try await transcribeFile( @@ -286,7 +283,7 @@ actor BatchAudioTranscriber { progressScale: 1.0 / Double(totalFiles), diarizationManager: batchDiarizer ) - batchLog.info("Sys transcription: \(sysRecords.count) records") + Log.batchTranscription.debug("Sys transcription: \(sysRecords.count, privacy: .public) records") } try Task.checkCancellation() @@ -299,7 +296,7 @@ actor BatchAudioTranscriber { allRecords.sort { $0.timestamp < $1.timestamp } guard !allRecords.isEmpty else { - batchLog.warning("Batch transcription produced no records for \(sessionID)") + Log.batchTranscription.warning("Batch transcription produced no records for \(sessionID, privacy: .public)") await sessionRepository.cleanupBatchAudio(sessionID: sessionID) status = .completed(sessionID: sessionID) return @@ -312,7 +309,7 @@ actor BatchAudioTranscriber { await sessionRepository.cleanupBatchAudio(sessionID: sessionID) status = .completed(sessionID: sessionID) - batchLog.info("Batch transcription completed for \(sessionID): \(allRecords.count) records") + Log.batchTranscription.info("Batch transcription completed for \(sessionID, privacy: .public): \(allRecords.count, privacy: .public) records") } // MARK: - File Transcription @@ -330,7 +327,7 @@ actor BatchAudioTranscriber { diarizationManager: DiarizationManager? = nil ) async throws -> [SessionRecord] { guard let audioFile = try? AVAudioFile(forReading: url) else { - batchLog.warning("Cannot open audio file: \(url.lastPathComponent)") + Log.batchTranscription.warning("Cannot open audio file: \(url.lastPathComponent, privacy: .public)") return [] } diff --git a/OpenOats/Sources/OpenOats/Transcription/DiarizationManager.swift b/OpenOats/Sources/OpenOats/Transcription/DiarizationManager.swift index 01d67985..7e09d4f1 100644 --- a/OpenOats/Sources/OpenOats/Transcription/DiarizationManager.swift +++ b/OpenOats/Sources/OpenOats/Transcription/DiarizationManager.swift @@ -1,8 +1,5 @@ import FluidAudio import Foundation -import os - -private let diarizationLog = Logger(subsystem: "com.openoats.app", category: "Diarization") /// Manages LS-EEND speaker diarization for system audio. /// Wraps the FluidAudio LSEENDDiarizer and provides speaker attribution @@ -13,10 +10,10 @@ actor DiarizationManager { /// Load the LS-EEND model for the given variant. Must be called before feedAudio/dominantSpeaker. func load(variant: LSEENDVariant = .dihard3) async throws { - diarizationLog.info("Loading LS-EEND model (variant: \(variant.rawValue))") + Log.diarization.info("Loading LS-EEND model (variant: \(variant.rawValue, privacy: .public))") try await diarizer.initialize(variant: variant) isInitialized = true - diarizationLog.info("LS-EEND model loaded") + Log.diarization.info("LS-EEND model loaded") } /// Feed audio samples to the diarizer. Samples should be at 16kHz mono Float32. @@ -77,7 +74,11 @@ actor DiarizationManager { /// Finalize the diarization session (flush tentative segments). func finalize() { guard isInitialized else { return } - _ = try? diarizer.finalizeSession() + do { + try diarizer.finalizeSession() + } catch { + Log.diarization.error("Failed to finalize LS-EEND session: \(error, privacy: .public)") + } } /// Reset the diarizer state for a new session. diff --git a/OpenOats/Sources/OpenOats/Transcription/StreamingTranscriber.swift b/OpenOats/Sources/OpenOats/Transcription/StreamingTranscriber.swift index b225f017..7f4e11e7 100644 --- a/OpenOats/Sources/OpenOats/Transcription/StreamingTranscriber.swift +++ b/OpenOats/Sources/OpenOats/Transcription/StreamingTranscriber.swift @@ -11,7 +11,6 @@ final class StreamingTranscriber: @unchecked Sendable { private let speaker: Speaker private let onPartial: @Sendable (String) -> Void private let onFinal: @Sendable (String) -> Void - private let log = Logger(subsystem: "com.openoats", category: "StreamingTranscriber") /// Resampler from source format to 16kHz mono Float32. private var converter: AVAudioConverter? @@ -68,14 +67,14 @@ final class StreamingTranscriber: @unchecked Sendable { bufferCount += 1 if bufferCount <= 3 { let fmt = buffer.format - diagLog("[\(speaker.storageKey)] buffer #\(bufferCount): frames=\(buffer.frameLength) sr=\(fmt.sampleRate) ch=\(fmt.channelCount) interleaved=\(fmt.isInterleaved) common=\(fmt.commonFormat.rawValue)") + Log.streaming.debug("[\(self.speaker.storageKey, privacy: .public)] buffer #\(bufferCount, privacy: .public): frames=\(buffer.frameLength, privacy: .public) sr=\(fmt.sampleRate, privacy: .public) ch=\(fmt.channelCount, privacy: .public) interleaved=\(fmt.isInterleaved, privacy: .public) common=\(fmt.commonFormat.rawValue, privacy: .public)") } guard let samples = extractSamples(buffer) else { continue } if bufferCount <= 3 { let maxVal = samples.max() ?? 0 - diagLog("[\(speaker.storageKey)] samples: count=\(samples.count) max=\(maxVal)") + Log.streaming.debug("[\(self.speaker.storageKey, privacy: .public)] samples: count=\(samples.count, privacy: .public) max=\(maxVal, privacy: .public)") } vadBuffer.append(contentsOf: samples) @@ -110,7 +109,7 @@ final class StreamingTranscriber: @unchecked Sendable { isSpeaking = true startedSpeech = true speechSamples = recentChunks.suffix(Self.prerollChunkCount).flatMap { $0 } - diagLog("[\(self.speaker.storageKey)] speech start") + Log.streaming.debug("[\(self.speaker.storageKey, privacy: .public)] speech start") } case .speechEnd: @@ -131,7 +130,7 @@ final class StreamingTranscriber: @unchecked Sendable { if endedSpeech { isSpeaking = false isRunningPartial = false - diagLog("[\(self.speaker.storageKey)] speech end, samples=\(speechSamples.count)") + Log.streaming.debug("[\(self.speaker.storageKey, privacy: .public)] speech end, samples=\(speechSamples.count, privacy: .public)") if speechSamples.count > Self.minimumSpeechSamples { let segment = speechSamples speechSamples.removeAll(keepingCapacity: true) @@ -170,7 +169,7 @@ final class StreamingTranscriber: @unchecked Sendable { } } } catch { - log.error("VAD error: \(error.localizedDescription)") + Log.streaming.error("VAD error: \(error, privacy: .public)") } } } @@ -188,13 +187,13 @@ final class StreamingTranscriber: @unchecked Sendable { do { let text = try await backend.transcribe(samples, locale: locale, previousContext: previousContext) guard !text.isEmpty else { return } - log.info("[\(self.speaker.storageKey)] transcribed: \(text.prefix(80))") + Log.streaming.debug("[\(self.speaker.storageKey, privacy: .public)] transcribed: \(text.prefix(80), privacy: .private)") // Store trailing words for cross-segment context let words = text.split(separator: " ") previousContext = words.suffix(Self.contextWordCount).joined(separator: " ") onFinal(text) } catch { - log.error("ASR error: \(error.localizedDescription)") + Log.streaming.error("ASR error: \(error, privacy: .public)") } } @@ -269,7 +268,7 @@ final class StreamingTranscriber: @unchecked Sendable { } if let error { - log.error("Resample error: \(error.localizedDescription)") + Log.streaming.error("Resample error: \(error, privacy: .public)") return nil } diff --git a/OpenOats/Sources/OpenOats/Transcription/TranscriptionEngine.swift b/OpenOats/Sources/OpenOats/Transcription/TranscriptionEngine.swift index f749f183..e48da036 100644 --- a/OpenOats/Sources/OpenOats/Transcription/TranscriptionEngine.swift +++ b/OpenOats/Sources/OpenOats/Transcription/TranscriptionEngine.swift @@ -4,19 +4,6 @@ import FluidAudio import Observation import os -/// Simple file logger for diagnostics — writes to /tmp/openoats.log -func diagLog(_ msg: String) { - let line = "\(Date()): \(msg)\n" - let path = "/tmp/openoats.log" - if let fh = FileHandle(forWritingAtPath: path) { - fh.seekToEndOfFile() - fh.write(line.data(using: .utf8)!) - fh.closeFile() - } else { - FileManager.default.createFile(atPath: path, contents: line.data(using: .utf8)) - } -} - enum TranscriptionEngineError: LocalizedError { case transcriberNotInitialized @@ -188,7 +175,7 @@ final class TranscriptionEngine { inputDeviceID: AudioDeviceID = 0, transcriptionModel: TranscriptionModel ) async { - diagLog("[ENGINE-0] start() called, isRunning=\(isRunning)") + Log.transcription.info("start() called, isRunning=\(self.isRunning, privacy: .public)") guard !isRunning else { return } lastError = nil refreshModelAvailability() @@ -232,7 +219,7 @@ final class TranscriptionEngine { downloadTotalBytes = transcriptionModel.estimatedDownloadBytes downloadDetail = DownloadProgressDetail(fraction: 0, sizeText: nil, speedText: nil, etaText: nil) } - diagLog("[ENGINE-1] loading transcription model \(transcriptionModel.rawValue)...") + Log.transcription.info("Loading transcription model \(transcriptionModel.rawValue, privacy: .public)") do { let vocab = settings.transcriptionCustomVocabulary let mic = transcriptionModel.makeBackend(customVocabulary: vocab) @@ -262,19 +249,19 @@ final class TranscriptionEngine { } assetStatus = "Loading VAD model..." - diagLog("[ENGINE-1b] loading VAD model...") + Log.transcription.info("Loading VAD model") let vad = try await VadManager() self.vadManager = vad // Optionally load speaker diarization model if settings.enableDiarization { assetStatus = "Loading diarization model..." - diagLog("[ENGINE-1c] loading LS-EEND diarization model...") + Log.transcription.info("Loading LS-EEND diarization model") let dm = DiarizationManager() let variant = LSEENDVariant(rawValue: settings.diarizationVariant.rawValue) ?? .dihard3 try await dm.load(variant: variant) self.diarizationManager = dm - diagLog("[ENGINE-1c] diarization model loaded") + Log.transcription.info("Diarization model loaded") } else { self.diarizationManager = nil } @@ -286,10 +273,10 @@ final class TranscriptionEngine { downloadStartTime = nil downloadTotalBytes = nil assetStatus = "Models ready" - diagLog("[ENGINE-2] transcription model loaded") + Log.transcription.info("Transcription model loaded") } catch { let msg = "Failed to load models: \(error.localizedDescription)" - diagLog("[ENGINE-2-FAIL] \(msg)") + Log.transcription.error("Failed to load models: \(msg, privacy: .public)") lastError = msg assetStatus = "Ready" isRunning = false @@ -299,7 +286,7 @@ final class TranscriptionEngine { downloadTotalBytes = nil // Clear corrupt cache so the next attempt triggers a fresh download settings.transcriptionModel.makeBackend().clearModelCache() - diagLog("[ENGINE-2-FAIL] cleared model cache for \(settings.transcriptionModel.rawValue)") + Log.transcription.info("Cleared model cache for \(self.settings.transcriptionModel.rawValue, privacy: .public)") needsModelDownload = true downloadConfirmed = false return @@ -311,7 +298,7 @@ final class TranscriptionEngine { userSelectedDeviceID = inputDeviceID guard let targetMicID = resolvedMicDeviceID(for: inputDeviceID) else { let msg = unavailableMicMessage(for: inputDeviceID) - diagLog("[ENGINE-3-FAIL] \(msg)") + Log.transcription.error("Mic unavailable: \(msg, privacy: .public)") lastError = msg assetStatus = "Ready" isRunning = false @@ -324,10 +311,10 @@ final class TranscriptionEngine { // AEC must be disabled to prevent capture failures. let useAEC = false if settings.enableEchoCancellation { - diagLog("[ENGINE-3] AEC disabled — conflicts with system audio capture") + Log.transcription.info("AEC disabled - conflicts with system audio capture") } - diagLog("[ENGINE-3] starting mic capture, targetMicID=\(String(describing: targetMicID)), aec=\(useAEC)") + Log.transcription.info("Starting mic capture, targetMicID=\(targetMicID, privacy: .public), aec=\(useAEC, privacy: .public)") startMicStream( locale: locale, vadManager: vadManager, @@ -337,7 +324,7 @@ final class TranscriptionEngine { // Check for immediate mic capture failure if let micError = micCapture.captureError { - diagLog("[ENGINE-3-FAIL] mic capture error: \(micError)") + Log.transcription.error("Mic capture error: \(micError, privacy: .public)") lastError = micError } @@ -348,7 +335,7 @@ final class TranscriptionEngine { guard let self, self.isRunning else { return } if !self.micCapture.hasCapturedFrames && self.micCapture.captureError == nil { if useAEC { - diagLog("[ENGINE-HEALTH] no mic audio after 5s with AEC, retrying without") + Log.transcription.error("No mic audio after 5s with AEC, retrying without") self.micCapture.finishStream() await self.micTask?.value self.micTask = nil @@ -360,7 +347,7 @@ final class TranscriptionEngine { echoCancellation: false ) } else { - diagLog("[ENGINE-HEALTH] no mic audio after 5s") + Log.transcription.error("No mic audio after 5s") self.lastError = "Microphone is not producing audio. Check your input device in System Settings." } } @@ -370,7 +357,7 @@ final class TranscriptionEngine { await startSystemAudioStream(locale: locale, vadManager: vadManager) assetStatus = "Transcribing (\(micBackend?.displayName ?? transcriptionModel.displayName))" - diagLog("[ENGINE-6] all transcription tasks started") + Log.transcription.info("All transcription tasks started") // Install CoreAudio listeners for live device routing changes installDefaultDeviceListener() @@ -385,7 +372,7 @@ final class TranscriptionEngine { pendingMicDeviceID = inputDeviceID if micRestartTask != nil { - diagLog("[ENGINE-MIC-SWAP] queued restart for device \(inputDeviceID)") + Log.transcription.info("Queued mic restart for device \(inputDeviceID, privacy: .public)") return } @@ -596,17 +583,17 @@ final class TranscriptionEngine { guard let targetMicID = resolvedMicDeviceID(for: inputDeviceID) else { let msg = unavailableMicMessage(for: inputDeviceID) - diagLog("[ENGINE-MIC-SWAP-FAIL] \(msg)") + Log.transcription.error("Mic swap failed: \(msg, privacy: .public)") lastError = msg return } guard targetMicID != currentMicDeviceID else { - diagLog("[ENGINE-MIC-SWAP] same device \(targetMicID), skipping") + Log.transcription.debug("Mic swap skipped, same device \(targetMicID, privacy: .public)") return } - diagLog("[ENGINE-MIC-SWAP] switching mic from \(currentMicDeviceID) to \(targetMicID)") + Log.transcription.info("Switching mic from \(self.currentMicDeviceID, privacy: .public) to \(targetMicID, privacy: .public)") micCapture.finishStream() await micTask?.value @@ -625,7 +612,7 @@ final class TranscriptionEngine { currentMicDeviceID = targetMicID lastError = nil - diagLog("[ENGINE-MIC-SWAP] mic restarted on device \(targetMicID)") + Log.transcription.info("Mic restarted on device \(targetMicID, privacy: .public)") } private func restartSystemAudio() { @@ -633,7 +620,7 @@ final class TranscriptionEngine { pendingSystemAudioRestart = true if sysRestartTask != nil { - diagLog("[ENGINE-SYS-SWAP] queued restart") + Log.transcription.info("Queued system audio restart") return } @@ -651,7 +638,7 @@ final class TranscriptionEngine { private func performSystemAudioRestart() async { guard isRunning, let vadManager else { return } - diagLog("[ENGINE-SYS-SWAP] restarting system audio stream") + Log.transcription.info("Restarting system audio stream") systemCapture.finishStream() await sysTask?.value @@ -664,7 +651,7 @@ final class TranscriptionEngine { await systemCapture.stop() await startSystemAudioStream(locale: settings.locale, vadManager: vadManager) - diagLog("[ENGINE-SYS-SWAP] system audio stream restarted") + Log.transcription.info("System audio stream restarted") } private func startMicStream( @@ -708,16 +695,16 @@ final class TranscriptionEngine { locale: Locale, vadManager: VadManager ) async { - diagLog("[ENGINE-4] starting system audio capture...") + Log.transcription.info("Starting system audio capture") let sysStreams: SystemAudioCapture.CaptureStreams do { sysStreams = try await systemCapture.bufferStream() - diagLog("[ENGINE-5] system audio capture started OK") + Log.transcription.info("System audio capture started") clearSystemAudioErrorIfPresent() } catch { let msg = "Failed to start system audio: \(error.localizedDescription)" - diagLog("[ENGINE-5-FAIL] \(msg)") + Log.transcription.error("Failed to start system audio: \(msg, privacy: .public)") lastError = msg return } @@ -804,7 +791,7 @@ final class TranscriptionEngine { ) -> StreamingTranscriber? { let backend = speaker == .you ? micBackend : systemBackend guard let backend else { - diagLog("[ENGINE] makeTranscriber called without initialized backend for \(speaker.storageKey)") + Log.transcription.error("makeTranscriber called without initialized backend for \(speaker.storageKey, privacy: .public)") return nil } return StreamingTranscriber( diff --git a/OpenOats/Sources/OpenOats/Transcription/WhisperKitManager.swift b/OpenOats/Sources/OpenOats/Transcription/WhisperKitManager.swift index 415e2e8c..61aed5d0 100644 --- a/OpenOats/Sources/OpenOats/Transcription/WhisperKitManager.swift +++ b/OpenOats/Sources/OpenOats/Transcription/WhisperKitManager.swift @@ -1,6 +1,5 @@ import Foundation import WhisperKit -import os /// Wraps WhisperKit for use as a transcription backend. /// Handles model download, initialization, and transcription of Float32 audio samples. @@ -26,7 +25,6 @@ final class WhisperKitManager: @unchecked Sendable { private let variant: Variant private var pipe: WhisperKit? - private let log = Logger(subsystem: "com.openoats", category: "WhisperKitManager") init(variant: Variant) { self.variant = variant diff --git a/OpenOats/Sources/OpenOats/Utils/Logging.swift b/OpenOats/Sources/OpenOats/Utils/Logging.swift new file mode 100644 index 00000000..74e1bb78 --- /dev/null +++ b/OpenOats/Sources/OpenOats/Utils/Logging.swift @@ -0,0 +1,24 @@ +import Foundation +import os + +enum Log { + static let mic = Logger(subsystem: subsystem, category: "MicCapture") + static let recorder = Logger(subsystem: subsystem, category: "AudioRecorder") + static let transcription = Logger(subsystem: subsystem, category: "TranscriptionEngine") + static let streaming = Logger(subsystem: subsystem, category: "StreamingTranscriber") + static let transcript = Logger(subsystem: subsystem, category: "TranscriptStore") + static let echo = Logger(subsystem: subsystem, category: "AcousticEchoFilter") + static let batchTranscription = Logger(subsystem: subsystem, category: "BatchTranscription") + static let batchTextCleaner = Logger(subsystem: subsystem, category: "BatchTextCleaner") + static let diarization = Logger(subsystem: subsystem, category: "Diarization") + static let granolaImporter = Logger(subsystem: subsystem, category: "GranolaImporter") + static let markdownMeetingWriter = Logger(subsystem: subsystem, category: "MarkdownMeetingWriter") + static let meetingDetection = Logger(subsystem: subsystem, category: "MeetingDetection") + static let sessionRepository = Logger(subsystem: subsystem, category: "SessionRepository") + static let webhook = Logger(subsystem: subsystem, category: "Webhook") + static let whisperkit = Logger(subsystem: subsystem, category: "WhisperKitManager") + + private static let subsystem = Bundle(for: BundleToken.self).bundleIdentifier ?? "com.openoats.app" +} + +private final class BundleToken {}