From dbcdae26ccb3db4e81f941928ae078ae83aaa105 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 15:37:47 -0500 Subject: [PATCH 01/10] fix: Restore Xcode 16 testing for AI QS --- .github/workflows/firebaseai.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/firebaseai.yml b/.github/workflows/firebaseai.yml index 95b82c6ea..76ea8dc86 100644 --- a/.github/workflows/firebaseai.yml +++ b/.github/workflows/firebaseai.yml @@ -36,6 +36,11 @@ jobs: platform: iOS device: iPhone 17 Pro ios_version: "26.1" + - os: macos-15 + xcode: "16.4" + platform: iOS + device: iPhone 16 + ios_version: "18.4" runs-on: ${{ matrix.os }} env: SETUP: firebaseai From 5493ceb4e392260007933c0760ffe27c0d0a7837 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 16:25:30 -0500 Subject: [PATCH 02/10] bump CK dep --- firebaseai/FirebaseAIExample.xcodeproj/project.pbxproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/firebaseai/FirebaseAIExample.xcodeproj/project.pbxproj b/firebaseai/FirebaseAIExample.xcodeproj/project.pbxproj index aabc3b33a..03d90e6ce 100644 --- a/firebaseai/FirebaseAIExample.xcodeproj/project.pbxproj +++ b/firebaseai/FirebaseAIExample.xcodeproj/project.pbxproj @@ -364,7 +364,7 @@ repositoryURL = "https://github.com/peterfriese/ConversationKit"; requirement = { kind = exactVersion; - version = 0.0.3; + version = 0.0.4; }; }; 88779D912EC8AC460080D023 /* XCRemoteSwiftPackageReference "firebase-ios-sdk" */ = { From 9312ddf3ac2252bd81ef32576bc06f7c7562acc6 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 16:49:45 -0500 Subject: [PATCH 03/10] fix: add backwards compatible API for Xcode 16 --- .../FirebaseAIExample/Shared/Audio/AudioController.swift | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index c966d5472..abdd43008 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -15,6 +15,12 @@ import AVFoundation import OSLog +#if compiler(<6.2) + extension AVAudioSession.CategoryOptions { + static let allowBluetoothHFP = AVAudioSession.CategoryOptions.allowBluetooth + } +#endif + /// Controls audio playback and recording. actor AudioController { private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") From 8f1fa7e6181d5f16275896aa0fa8df13210e9f14 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 16:50:52 -0500 Subject: [PATCH 04/10] style --- .../FirebaseAIExample/Shared/Audio/AudioController.swift | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index abdd43008..fb78c7d19 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -16,9 +16,9 @@ import AVFoundation import OSLog #if compiler(<6.2) - extension AVAudioSession.CategoryOptions { - static let allowBluetoothHFP = AVAudioSession.CategoryOptions.allowBluetooth - } +extension AVAudioSession.CategoryOptions { + static let allowBluetoothHFP = AVAudioSession.CategoryOptions.allowBluetooth +} #endif /// Controls audio playback and recording. From 6ee8c0a5b4b8a01147f7de90775a90cfd23ef056 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 16:55:02 -0500 Subject: [PATCH 05/10] address warnings --- .../Shared/Audio/AudioController.swift | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index fb78c7d19..d2f0d5ec0 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -107,12 +107,12 @@ actor AudioController { /// Queues audio for playback. public func playAudio(audio: Data) async throws { - try await audioPlayer?.play(audio) + try audioPlayer?.play(audio) } /// Interrupts and clears the currently pending audio playback queue. public func interrupt() async { - await audioPlayer?.interrupt() + audioPlayer?.interrupt() } private func stopListeningAndPlayback() async { @@ -130,8 +130,8 @@ actor AudioController { logger.error("Failed to disable voice processing: \(error.localizedDescription)") } } - await microphone?.stop() - await audioPlayer?.stop() + microphone?.stop() + audioPlayer?.stop() } /// Start audio processing functionality. @@ -163,10 +163,10 @@ actor AudioController { } private func setupMicrophone(_ engine: AVAudioEngine) async throws { - let microphone = await Microphone(engine: engine) + let microphone = Microphone(engine: engine) self.microphone = microphone - await microphone.start() + microphone.start() let micFormat = engine.inputNode.outputFormat(forBus: 0) guard let converter = AVAudioConverter(from: micFormat, to: modelInputFormat) else { @@ -174,15 +174,15 @@ actor AudioController { } listenTask = Task { - for await audio in await microphone.audio { - try microphoneDataQueue.yield(await converter.convertBuffer(audio)) + for await audio in microphone.audio { + try microphoneDataQueue.yield(converter.convertBuffer(audio)) } } } private func setupAudioPlayback(_ engine: AVAudioEngine) async throws { let playbackFormat = engine.outputNode.outputFormat(forBus: 0) - audioPlayer = try await AudioPlayer( + audioPlayer = try AudioPlayer( engine: engine, inputFormat: modelOutputFormat, outputFormat: playbackFormat From 0783527ad4644d6b81819afe55c922f7a18845ea Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 16:56:27 -0500 Subject: [PATCH 06/10] style --- .../Shared/Audio/AudioController.swift | 413 +++++++++--------- 1 file changed, 207 insertions(+), 206 deletions(-) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index d2f0d5ec0..976b75a86 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -16,235 +16,236 @@ import AVFoundation import OSLog #if compiler(<6.2) -extension AVAudioSession.CategoryOptions { - static let allowBluetoothHFP = AVAudioSession.CategoryOptions.allowBluetooth -} + extension AVAudioSession.CategoryOptions { + static let allowBluetoothHFP = AVAudioSession.CategoryOptions.allowBluetooth + } #endif /// Controls audio playback and recording. actor AudioController { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - - /// Data processed from the microphone. - private let microphoneData: AsyncStream - private let microphoneDataQueue: AsyncStream.Continuation - private var audioPlayer: AudioPlayer? - private var audioEngine: AVAudioEngine? - private var microphone: Microphone? - private var listenTask: Task? - private var routeTask: Task? - - /// Port types that are considered "headphones" for our use-case. - /// - /// More specifically, airpods are considered bluetooth ports instead of headphones, so - /// this array is necessary. - private let headphonePortTypes: [AVAudioSession.Port] = [ - .headphones, - .bluetoothA2DP, - .bluetoothLE, - .bluetoothHFP, - ] - - private let modelInputFormat: AVAudioFormat - private let modelOutputFormat: AVAudioFormat - - private var stopped = false - - public init() async throws { - let session = AVAudioSession.sharedInstance() - try session.setCategory( - .playAndRecord, - mode: .voiceChat, - options: [.defaultToSpeaker, .allowBluetoothHFP, .duckOthers, - .interruptSpokenAudioAndMixWithOthers, .allowBluetoothA2DP] - ) - try session.setPreferredIOBufferDuration(0.01) - try session.setActive(true) - - guard let modelInputFormat = AVAudioFormat( - commonFormat: .pcmFormatInt16, - sampleRate: 16000, - channels: 1, - interleaved: false - ) else { - throw ApplicationError("Failed to create model input format") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + + /// Data processed from the microphone. + private let microphoneData: AsyncStream + private let microphoneDataQueue: AsyncStream.Continuation + private var audioPlayer: AudioPlayer? + private var audioEngine: AVAudioEngine? + private var microphone: Microphone? + private var listenTask: Task? + private var routeTask: Task? + + /// Port types that are considered "headphones" for our use-case. + /// + /// More specifically, airpods are considered bluetooth ports instead of headphones, so + /// this array is necessary. + private let headphonePortTypes: [AVAudioSession.Port] = [ + .headphones, + .bluetoothA2DP, + .bluetoothLE, + .bluetoothHFP, + ] + + private let modelInputFormat: AVAudioFormat + private let modelOutputFormat: AVAudioFormat + + private var stopped = false + + public init() async throws { + let session = AVAudioSession.sharedInstance() + try session.setCategory( + .playAndRecord, + mode: .voiceChat, + options: [.defaultToSpeaker, .allowBluetoothHFP, .duckOthers, + .interruptSpokenAudioAndMixWithOthers, .allowBluetoothA2DP] + ) + try session.setPreferredIOBufferDuration(0.01) + try session.setActive(true) + + guard let modelInputFormat = AVAudioFormat( + commonFormat: .pcmFormatInt16, + sampleRate: 16000, + channels: 1, + interleaved: false + ) else { + throw ApplicationError("Failed to create model input format") + } + + guard let modelOutputFormat = AVAudioFormat( + commonFormat: .pcmFormatInt16, + sampleRate: 24000, + channels: 1, + interleaved: true + ) else { + throw ApplicationError("Failed to create model output format") + } + + self.modelInputFormat = modelInputFormat + self.modelOutputFormat = modelOutputFormat + + let (processedData, dataQueue) = AsyncStream.makeStream() + microphoneData = processedData + microphoneDataQueue = dataQueue + + listenForRouteChange() } - guard let modelOutputFormat = AVAudioFormat( - commonFormat: .pcmFormatInt16, - sampleRate: 24000, - channels: 1, - interleaved: true - ) else { - throw ApplicationError("Failed to create model output format") + /// Kicks off audio processing, and returns a stream of recorded microphone audio data. + public func listenToMic() async throws -> AsyncStream { + try await spawnAudioProcessingThread() + return microphoneData } - self.modelInputFormat = modelInputFormat - self.modelOutputFormat = modelOutputFormat - - let (processedData, dataQueue) = AsyncStream.makeStream() - microphoneData = processedData - microphoneDataQueue = dataQueue - - listenForRouteChange() - } - - /// Kicks off audio processing, and returns a stream of recorded microphone audio data. - public func listenToMic() async throws -> AsyncStream { - try await spawnAudioProcessingThread() - return microphoneData - } - - /// Permanently stop all audio processing. - /// - /// To start again, create a new instance of ``AudioController``. - public func stop() async { - stopped = true - await stopListeningAndPlayback() - microphoneDataQueue.finish() - routeTask?.cancel() - } - - /// Queues audio for playback. - public func playAudio(audio: Data) async throws { - try audioPlayer?.play(audio) - } - - /// Interrupts and clears the currently pending audio playback queue. - public func interrupt() async { - audioPlayer?.interrupt() - } - - private func stopListeningAndPlayback() async { - listenTask?.cancel() - // audio engine needs to be stopped before disconnecting nodes - audioEngine?.pause() - audioEngine?.stop() - if let audioEngine { - do { - // the VP IO leaves behind artifacts, so we need to disable it to properly clean up - if audioEngine.inputNode.isVoiceProcessingEnabled { - try audioEngine.inputNode.setVoiceProcessingEnabled(false) - } - } catch { - logger.error("Failed to disable voice processing: \(error.localizedDescription)") - } + /// Permanently stop all audio processing. + /// + /// To start again, create a new instance of ``AudioController``. + public func stop() async { + stopped = true + await stopListeningAndPlayback() + microphoneDataQueue.finish() + routeTask?.cancel() } - microphone?.stop() - audioPlayer?.stop() - } - - /// Start audio processing functionality. - /// - /// Will stop any currently running audio processing. - /// - /// This function is also called whenever the input or output device change, - /// so it needs to be able to setup the audio processing without disrupting - /// the consumer of the microphone data. - private func spawnAudioProcessingThread() async throws { - if stopped { return } - - await stopListeningAndPlayback() - - // we need to start a new audio engine if the output device changed, so we might as well do it regardless - let audioEngine = AVAudioEngine() - self.audioEngine = audioEngine - - try await setupAudioPlayback(audioEngine) - try setupVoiceProcessing(audioEngine) - - do { - try audioEngine.start() - } catch { - throw ApplicationError("Failed to start audio engine: \(error.localizedDescription)") + + /// Queues audio for playback. + public func playAudio(audio: Data) async throws { + try audioPlayer?.play(audio) } - try await setupMicrophone(audioEngine) - } + /// Interrupts and clears the currently pending audio playback queue. + public func interrupt() async { + audioPlayer?.interrupt() + } - private func setupMicrophone(_ engine: AVAudioEngine) async throws { - let microphone = Microphone(engine: engine) - self.microphone = microphone + private func stopListeningAndPlayback() async { + listenTask?.cancel() + // audio engine needs to be stopped before disconnecting nodes + audioEngine?.pause() + audioEngine?.stop() + if let audioEngine { + do { + // the VP IO leaves behind artifacts, so we need to disable it to properly clean up + if audioEngine.inputNode.isVoiceProcessingEnabled { + try audioEngine.inputNode.setVoiceProcessingEnabled(false) + } + } catch { + logger.error("Failed to disable voice processing: \(error.localizedDescription)") + } + } + microphone?.stop() + audioPlayer?.stop() + } - microphone.start() + /// Start audio processing functionality. + /// + /// Will stop any currently running audio processing. + /// + /// This function is also called whenever the input or output device change, + /// so it needs to be able to setup the audio processing without disrupting + /// the consumer of the microphone data. + private func spawnAudioProcessingThread() async throws { + if stopped { return } - let micFormat = engine.inputNode.outputFormat(forBus: 0) - guard let converter = AVAudioConverter(from: micFormat, to: modelInputFormat) else { - throw ApplicationError("Failed to create audio converter") - } + await stopListeningAndPlayback() - listenTask = Task { - for await audio in microphone.audio { - try microphoneDataQueue.yield(converter.convertBuffer(audio)) - } - } - } - - private func setupAudioPlayback(_ engine: AVAudioEngine) async throws { - let playbackFormat = engine.outputNode.outputFormat(forBus: 0) - audioPlayer = try AudioPlayer( - engine: engine, - inputFormat: modelOutputFormat, - outputFormat: playbackFormat - ) - } - - /// Sets up the voice processing I/O, if it needs to be setup. - private func setupVoiceProcessing(_ engine: AVAudioEngine) throws { - do { - let headphonesConnected = headphonesConnected() - let vpEnabled = engine.inputNode.isVoiceProcessingEnabled - - if !vpEnabled, !headphonesConnected { - try engine.inputNode.setVoiceProcessingEnabled(true) - } else if headphonesConnected, vpEnabled { - // bluetooth headphones have integrated AEC, so if we don't disable VP IO we get muted output - try engine.inputNode.setVoiceProcessingEnabled(false) - } - } catch { - throw ApplicationError("Failed to enable voice processing: \(error.localizedDescription)") + // we need to start a new audio engine if the output device changed, so we might as well do it regardless + let audioEngine = AVAudioEngine() + self.audioEngine = audioEngine + + try await setupAudioPlayback(audioEngine) + try setupVoiceProcessing(audioEngine) + + do { + try audioEngine.start() + } catch { + throw ApplicationError("Failed to start audio engine: \(error.localizedDescription)") + } + + try await setupMicrophone(audioEngine) } - } - - /// When the output device changes, ensure the audio playback and recording classes are properly restarted. - private func listenForRouteChange() { - routeTask?.cancel() - routeTask = Task { [weak self] in - for await notification in NotificationCenter.default.notifications( - named: AVAudioSession.routeChangeNotification - ) { - await self?.handleRouteChange(notification: notification) - } + + private func setupMicrophone(_ engine: AVAudioEngine) async throws { + let microphone = Microphone(engine: engine) + self.microphone = microphone + + microphone.start() + + let micFormat = engine.inputNode.outputFormat(forBus: 0) + guard let converter = AVAudioConverter(from: micFormat, to: modelInputFormat) else { + throw ApplicationError("Failed to create audio converter") + } + + listenTask = Task { + for await audio in microphone.audio { + try microphoneDataQueue.yield(converter.convertBuffer(audio)) + } + } } - } - private func handleRouteChange(notification: Notification) { - guard let userInfo = notification.userInfo, - let reasonValue = userInfo[AVAudioSessionRouteChangeReasonKey] as? UInt, - let reason = AVAudioSession.RouteChangeReason(rawValue: reasonValue) else { - return + private func setupAudioPlayback(_ engine: AVAudioEngine) async throws { + let playbackFormat = engine.outputNode.outputFormat(forBus: 0) + audioPlayer = try AudioPlayer( + engine: engine, + inputFormat: modelOutputFormat, + outputFormat: playbackFormat + ) } - switch reason { - case .newDeviceAvailable, .oldDeviceUnavailable: - Task { @MainActor in + /// Sets up the voice processing I/O, if it needs to be setup. + private func setupVoiceProcessing(_ engine: AVAudioEngine) throws { do { - try await spawnAudioProcessingThread() + let headphonesConnected = headphonesConnected() + let vpEnabled = engine.inputNode.isVoiceProcessingEnabled + + if !vpEnabled, !headphonesConnected { + try engine.inputNode.setVoiceProcessingEnabled(true) + } else if headphonesConnected, vpEnabled { + // bluetooth headphones have integrated AEC, so if we don't disable VP IO we get muted output + try engine.inputNode.setVoiceProcessingEnabled(false) + } } catch { - await logger - .error("Failed to spawn audio processing thread: \(String(describing: error))") + throw ApplicationError("Failed to enable voice processing: \(error.localizedDescription)") + } + } + + /// When the output device changes, ensure the audio playback and recording classes are properly restarted. + private func listenForRouteChange() { + routeTask?.cancel() + routeTask = Task { [weak self] in + for await notification in NotificationCenter.default.notifications( + named: AVAudioSession.routeChangeNotification + ) { + await self?.handleRouteChange(notification: notification) + } } - } - default: () } - } - - /// Checks if the current audio route is a a headphone. - /// - /// This includes airpods. - private func headphonesConnected() -> Bool { - return AVAudioSession.sharedInstance().currentRoute.outputs.contains { - headphonePortTypes.contains($0.portType) + + private func handleRouteChange(notification: Notification) { + guard let userInfo = notification.userInfo, + let reasonValue = userInfo[AVAudioSessionRouteChangeReasonKey] as? UInt, + let reason = AVAudioSession.RouteChangeReason(rawValue: reasonValue) + else { + return + } + + switch reason { + case .newDeviceAvailable, .oldDeviceUnavailable: + Task { @MainActor in + do { + try await spawnAudioProcessingThread() + } catch { + await logger + .error("Failed to spawn audio processing thread: \(String(describing: error))") + } + } + default: () + } + } + + /// Checks if the current audio route is a a headphone. + /// + /// This includes airpods. + private func headphonesConnected() -> Bool { + return AVAudioSession.sharedInstance().currentRoute.outputs.contains { + headphonePortTypes.contains($0.portType) + } } - } } From 2d1a0ec4dde8b8c60035bcc2692499c00384a297 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 17:05:12 -0500 Subject: [PATCH 07/10] style --- .../FirebaseAIExample/Shared/Audio/AudioController.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index 976b75a86..0152b1d5f 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -220,8 +220,8 @@ actor AudioController { private func handleRouteChange(notification: Notification) { guard let userInfo = notification.userInfo, - let reasonValue = userInfo[AVAudioSessionRouteChangeReasonKey] as? UInt, - let reason = AVAudioSession.RouteChangeReason(rawValue: reasonValue) + let reasonValue = userInfo[AVAudioSessionRouteChangeReasonKey] as? UInt, + let reason = AVAudioSession.RouteChangeReason(rawValue: reasonValue) else { return } From 05fb805f9be1d31f4ceeea1007943dae06a96d46 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 17:09:44 -0500 Subject: [PATCH 08/10] Style --- .../FirebaseAIExample/ContentView.swift | 174 +++--- .../Features/Chat/Models/ChatMessage.swift | 191 +++--- .../Features/Chat/Screens/ChatScreen.swift | 85 +-- .../Chat/ViewModels/ChatViewModel.swift | 284 ++++----- .../Features/Chat/Views/BouncingDots.swift | 96 +-- .../Features/Chat/Views/MessageView.swift | 240 ++++---- .../Screens/FunctionCallingScreen.swift | 85 +-- .../Services/WeatherService.swift | 18 +- .../ViewModels/FunctionCallingViewModel.swift | 468 +++++++-------- .../GenerateContentFromTemplateScreen.swift | 142 ++--- ...GenerateContentFromTemplateViewModel.swift | 138 ++--- .../Grounding/Screens/GroundingScreen.swift | 85 +-- .../ViewModels/GroundingViewModel.swift | 266 ++++----- .../Views/GoogleSearchSuggestionView.swift | 101 ++-- .../Views/GroundedResponseView.swift | 102 ++-- .../Imagen/ImagenFromTemplateScreen.swift | 152 ++--- .../Imagen/ImagenFromTemplateViewModel.swift | 132 ++--- .../Features/Imagen/ImagenScreen.swift | 162 ++--- .../Features/Imagen/ImagenViewModel.swift | 146 ++--- .../Features/Live/Screens/LiveScreen.swift | 88 +-- .../Live/ViewModels/LiveViewModel.swift | 474 +++++++-------- .../Live/ViewModels/TypeWriterViewModel.swift | 122 ++-- .../Live/Views/AudioOutputToggle.swift | 30 +- .../Features/Live/Views/ConnectButton.swift | 166 +++--- .../Features/Live/Views/ModelAvatar.swift | 84 +-- .../Features/Live/Views/TranscriptView.swift | 44 +- .../Models/MultimodalAttachment.swift | 475 ++++++++------- .../Multimodal/Screens/MultimodalScreen.swift | 303 +++++----- .../ViewModels/MultimodalViewModel.swift | 356 +++++------ .../Views/AttachmentPreviewCard.swift | 272 ++++----- .../FirebaseAIExampleApp.swift | 61 +- .../Shared/ApplicationError.swift | 18 +- .../Shared/Audio/AudioBufferHelpers.swift | 118 ++-- .../Shared/Audio/AudioPlayer.swift | 108 ++-- .../Shared/Audio/Microphone.swift | 84 +-- .../Shared/Models/BackendOption.swift | 6 +- .../Shared/Models/Sample.swift | 557 +++++++++--------- .../Shared/Models/UseCase.swift | 16 +- .../Shared/Util/Color+Hex.swift | 44 +- .../Shared/Views/ErrorDetailsView.swift | 390 ++++++------ .../Shared/Views/FilterChipView.swift | 46 +- .../Shared/Views/InlineTip.swift | 60 +- .../Shared/Views/ProgressOverlay.swift | 34 +- .../Shared/Views/SampleCardView.swift | 180 +++--- 44 files changed, 3605 insertions(+), 3598 deletions(-) diff --git a/firebaseai/FirebaseAIExample/ContentView.swift b/firebaseai/FirebaseAIExample/ContentView.swift index 830630f38..390178581 100644 --- a/firebaseai/FirebaseAIExample/ContentView.swift +++ b/firebaseai/FirebaseAIExample/ContentView.swift @@ -12,112 +12,112 @@ // See the License for the specific language governing permissions and // limitations under the License. -import SwiftUI import FirebaseAILogic +import SwiftUI struct ContentView: View { - @State private var selectedBackend: BackendOption = .googleAI - @State private var selectedUseCase: UseCase = .all + @State private var selectedBackend: BackendOption = .googleAI + @State private var selectedUseCase: UseCase = .all - var filteredSamples: [Sample] { - if selectedUseCase == .all { - return Sample.samples - } else { - return Sample.samples.filter { $0.useCases.contains(selectedUseCase) } + var filteredSamples: [Sample] { + if selectedUseCase == .all { + return Sample.samples + } else { + return Sample.samples.filter { $0.useCases.contains(selectedUseCase) } + } } - } - let columns = [ - GridItem(.adaptive(minimum: 150)), - ] + let columns = [ + GridItem(.adaptive(minimum: 150)), + ] - var body: some View { - NavigationStack { - ScrollView { - VStack(alignment: .leading, spacing: 20) { - // Backend Configuration - VStack(alignment: .leading) { - Text("Backend Configuration") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + var body: some View { + NavigationStack { + ScrollView { + VStack(alignment: .leading, spacing: 20) { + // Backend Configuration + VStack(alignment: .leading) { + Text("Backend Configuration") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - Picker("Backend", selection: $selectedBackend) { - ForEach(BackendOption.allCases) { option in - Text(option.rawValue) - .tag(option) - } - } - .pickerStyle(SegmentedPickerStyle()) - .padding(.horizontal) - } + Picker("Backend", selection: $selectedBackend) { + ForEach(BackendOption.allCases) { option in + Text(option.rawValue) + .tag(option) + } + } + .pickerStyle(SegmentedPickerStyle()) + .padding(.horizontal) + } - // Use Case Filter - VStack(alignment: .leading) { - Text("Filter by use case") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + // Use Case Filter + VStack(alignment: .leading) { + Text("Filter by use case") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - ScrollView(.horizontal, showsIndicators: false) { - HStack(spacing: 10) { - ForEach(UseCase.allCases) { useCase in - FilterChipView(useCase: useCase, isSelected: selectedUseCase == useCase) { - selectedUseCase = useCase - } - } - } - .padding(.horizontal) - } - } + ScrollView(.horizontal, showsIndicators: false) { + HStack(spacing: 10) { + ForEach(UseCase.allCases) { useCase in + FilterChipView(useCase: useCase, isSelected: selectedUseCase == useCase) { + selectedUseCase = useCase + } + } + } + .padding(.horizontal) + } + } - // Samples - VStack(alignment: .leading) { - Text("Samples") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + // Samples + VStack(alignment: .leading) { + Text("Samples") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - LazyVGrid(columns: columns, spacing: 20) { - ForEach(filteredSamples) { sample in - NavigationLink(destination: destinationView(for: sample)) { - SampleCardView(sample: sample) + LazyVGrid(columns: columns, spacing: 20) { + ForEach(filteredSamples) { sample in + NavigationLink(destination: destinationView(for: sample)) { + SampleCardView(sample: sample) + } + .buttonStyle(PlainButtonStyle()) + } + } + .padding(.horizontal) + } } - .buttonStyle(PlainButtonStyle()) - } + .padding(.vertical) } - .padding(.horizontal) - } + .background(Color(.systemGroupedBackground)) + .navigationTitle("Firebase AI Logic") } - .padding(.vertical) - } - .background(Color(.systemGroupedBackground)) - .navigationTitle("Firebase AI Logic") } - } - @ViewBuilder - private func destinationView(for sample: Sample) -> some View { - switch sample.navRoute { - case "ChatScreen": - ChatScreen(backendType: selectedBackend, sample: sample) - case "ImagenScreen": - ImagenScreen(backendType: selectedBackend, sample: sample) - case "ImagenFromTemplateScreen": - ImagenFromTemplateScreen(backendType: selectedBackend, sample: sample) - case "GenerateContentFromTemplateScreen": - GenerateContentFromTemplateScreen(backendType: selectedBackend, sample: sample) - case "MultimodalScreen": - MultimodalScreen(backendType: selectedBackend, sample: sample) - case "FunctionCallingScreen": - FunctionCallingScreen(backendType: selectedBackend, sample: sample) - case "GroundingScreen": - GroundingScreen(backendType: selectedBackend, sample: sample) - case "LiveScreen": - LiveScreen(backendType: selectedBackend, sample: sample) - default: - EmptyView() + @ViewBuilder + private func destinationView(for sample: Sample) -> some View { + switch sample.navRoute { + case "ChatScreen": + ChatScreen(backendType: selectedBackend, sample: sample) + case "ImagenScreen": + ImagenScreen(backendType: selectedBackend, sample: sample) + case "ImagenFromTemplateScreen": + ImagenFromTemplateScreen(backendType: selectedBackend, sample: sample) + case "GenerateContentFromTemplateScreen": + GenerateContentFromTemplateScreen(backendType: selectedBackend, sample: sample) + case "MultimodalScreen": + MultimodalScreen(backendType: selectedBackend, sample: sample) + case "FunctionCallingScreen": + FunctionCallingScreen(backendType: selectedBackend, sample: sample) + case "GroundingScreen": + GroundingScreen(backendType: selectedBackend, sample: sample) + case "LiveScreen": + LiveScreen(backendType: selectedBackend, sample: sample) + default: + EmptyView() + } } - } } #Preview { - ContentView() + ContentView() } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift b/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift index b181ca19d..05a5e67c1 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift @@ -13,121 +13,122 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Foundation import ConversationKit +import Foundation import UIKit public struct ChatMessage: Message { - public let id: UUID = .init() - public var content: String? - public let participant: Participant - public let error: (any Error)? - public var pending = false - public var groundingMetadata: GroundingMetadata? - public var attachments: [MultimodalAttachment] = [] - public var image: UIImage? - // required by the Message protocol, but not used in this app - public var imageURL: String? - - public init(content: String? = nil, imageURL: String? = nil, participant: Participant, - error: (any Error)? = nil, pending: Bool = false, - attachments: [MultimodalAttachment] = [], image: UIImage? = nil) { - self.content = content - self.imageURL = imageURL - self.participant = participant - self.error = error - self.pending = pending - self.attachments = attachments - self.image = image - } - - // Protocol-required initializer - public init(content: String?, imageURL: String? = nil, participant: Participant) { - self.content = content - self.imageURL = imageURL - self.participant = participant - error = nil - } -} + public let id: UUID = .init() + public var content: String? + public let participant: Participant + public let error: (any Error)? + public var pending = false + public var groundingMetadata: GroundingMetadata? + public var attachments: [MultimodalAttachment] = [] + public var image: UIImage? + // required by the Message protocol, but not used in this app + public var imageURL: String? + + public init(content: String? = nil, imageURL: String? = nil, participant: Participant, + error: (any Error)? = nil, pending: Bool = false, + attachments: [MultimodalAttachment] = [], image: UIImage? = nil) + { + self.content = content + self.imageURL = imageURL + self.participant = participant + self.error = error + self.pending = pending + self.attachments = attachments + self.image = image + } -extension ChatMessage { - public static func pending(participant: Participant) -> ChatMessage { - Self(content: "", participant: participant, pending: true) - } + // Protocol-required initializer + public init(content: String?, imageURL: String? = nil, participant: Participant) { + self.content = content + self.imageURL = imageURL + self.participant = participant + error = nil + } } -// Implement Equatable and Hashable for ChatMessage (ignore error) -extension ChatMessage { - public static func == (lhs: ChatMessage, rhs: ChatMessage) -> Bool { - lhs.id == rhs.id && - lhs.content == rhs.content && - lhs.participant == rhs.participant && - lhs.image == rhs.image && - lhs.attachments == rhs.attachments - // intentionally ignore `error` - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(id) - hasher.combine(content) - hasher.combine(participant) - hasher.combine(image) - hasher.combine(attachments) - // intentionally ignore `error` - } +public extension ChatMessage { + static func pending(participant: Participant) -> ChatMessage { + Self(content: "", participant: participant, pending: true) + } } +// Implement Equatable and Hashable for ChatMessage (ignore error) public extension ChatMessage { - static var samples: [ChatMessage] = [ - .init(content: "Hello. What can I do for you today?", participant: .other), - .init(content: "Show me a simple loop in Swift.", participant: .user), - .init(content: """ - Sure, here is a simple loop in Swift: - - # Example 1 - ``` - for i in 1...5 { - print("Hello, world!") + static func == (lhs: ChatMessage, rhs: ChatMessage) -> Bool { + lhs.id == rhs.id && + lhs.content == rhs.content && + lhs.participant == rhs.participant && + lhs.image == rhs.image && + lhs.attachments == rhs.attachments + // intentionally ignore `error` } - ``` - - This loop will print the string "Hello, world!" five times. The for loop iterates over a range of numbers, - in this case the numbers from 1 to 5. The variable i is assigned each number in the range, and the code inside the loop is executed. - **Here is another example of a simple loop in Swift:** - ```swift - var sum = 0 - for i in 1...100 { - sum += i + func hash(into hasher: inout Hasher) { + hasher.combine(id) + hasher.combine(content) + hasher.combine(participant) + hasher.combine(image) + hasher.combine(attachments) + // intentionally ignore `error` } - print("The sum of the numbers from 1 to 100 is \\(sum).") - ``` - - This loop calculates the sum of the numbers from 1 to 100. The variable sum is initialized to 0, and then the for loop iterates over the range of numbers from 1 to 100. The variable i is assigned each number in the range, and the value of i is added to the sum variable. After the loop has finished executing, the value of sum is printed to the console. - """, participant: .other), - ] +} - static var sample = samples[0] +public extension ChatMessage { + static var samples: [ChatMessage] = [ + .init(content: "Hello. What can I do for you today?", participant: .other), + .init(content: "Show me a simple loop in Swift.", participant: .user), + .init(content: """ + Sure, here is a simple loop in Swift: + + # Example 1 + ``` + for i in 1...5 { + print("Hello, world!") + } + ``` + + This loop will print the string "Hello, world!" five times. The for loop iterates over a range of numbers, + in this case the numbers from 1 to 5. The variable i is assigned each number in the range, and the code inside the loop is executed. + + **Here is another example of a simple loop in Swift:** + ```swift + var sum = 0 + for i in 1...100 { + sum += i + } + print("The sum of the numbers from 1 to 100 is \\(sum).") + ``` + + This loop calculates the sum of the numbers from 1 to 100. The variable sum is initialized to 0, and then the for loop iterates over the range of numbers from 1 to 100. The variable i is assigned each number in the range, and the value of i is added to the sum variable. After the loop has finished executing, the value of sum is printed to the console. + """, participant: .other), + ] + + static var sample = samples[0] } public extension ChatMessage { - static func from(_ modelContent: ModelContent) -> ChatMessage? { - // TODO: add non-text parts to message when multi-model support is added - let text = modelContent.parts.compactMap { ($0 as? TextPart)?.text }.joined() - guard !text.isEmpty else { - return nil - } + static func from(_ modelContent: ModelContent) -> ChatMessage? { + // TODO: add non-text parts to message when multi-model support is added + let text = modelContent.parts.compactMap { ($0 as? TextPart)?.text }.joined() + guard !text.isEmpty else { + return nil + } - let participant: Participant = (modelContent.role == "user") ? .user : .other + let participant: Participant = (modelContent.role == "user") ? .user : .other - return ChatMessage(content: text, participant: participant) - } + return ChatMessage(content: text, participant: participant) + } - static func from(_ modelContents: [ModelContent]) -> [ChatMessage] { - return modelContents.compactMap { from($0) } - } + static func from(_ modelContents: [ModelContent]) -> [ChatMessage] { + return modelContents.compactMap { from($0) } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift b/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift index ac405951f..870f90b7f 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift @@ -13,59 +13,60 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import SwiftUI import ConversationKit +import SwiftUI struct ChatScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ChatViewModel + let backendType: BackendOption + @StateObject var viewModel: ChatViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ChatViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ChatViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { error in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) + { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { _ in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) } - } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - ChatScreen(backendType: .googleAI) + ChatScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift b/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift index 02d2beebe..f6e1e6c60 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift @@ -13,177 +13,177 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Foundation -import UIKit import Combine import ConversationKit +import Foundation +import UIKit @MainActor class ChatViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() - - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - @Published var error: Error? - var hasError: Bool { - return error != nil - } + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var presentErrorDetails: Bool = false + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var presentErrorDetails: Bool = false - private var model: GenerativeModel - private var chat: Chat + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var chatTask: Task? + private var model: GenerativeModel + private var chat: Chat - private var sample: Sample? - private var backendType: BackendOption + private var chatTask: Task? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + private var sample: Sample? + private var backendType: BackendOption - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - generationConfig: sample?.generationConfig, - systemInstruction: sample?.systemInstruction - ) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { - messages = ChatMessage.from(chatHistory) - chat = model.startChat(history: chatHistory) - } else { - chat = model.startChat() - } + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + generationConfig: sample?.generationConfig, + systemInstruction: sample?.systemInstruction + ) - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" - } + if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { + messages = ChatMessage.from(chatHistory) + chat = model.startChat(history: chatHistory) + } else { + chat = model.startChat() + } - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" } - } - - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" - } - - func stop() { - chatTask?.cancel() - error = nil - } - - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } - - if let inlineDataPart = chunk.inlineDataParts.first { - if let uiImage = UIImage(data: inlineDataPart.data) { - messages[messages.count - 1].image = uiImage - } else { - print("Failed to convert inline data to UIImage") - } - } + + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" + } - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) + func stop() { + chatTask?.cancel() + error = nil + } - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() - do { - var response: GenerateContentResponse? - response = try await chat.sendMessage(text) + chatTask = Task { + busy = true + defer { + busy = false + } - if let responseText = response?.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } + + if let inlineDataPart = chunk.inlineDataParts.first { + if let uiImage = UIImage(data: inlineDataPart.data) { + messages[messages.count - 1].image = uiImage + } else { + print("Failed to convert inline data to UIImage") + } + } + } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } + + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - if let inlineDataPart = response?.inlineDataParts.first { - if let uiImage = UIImage(data: inlineDataPart.data) { - messages[messages.count - 1].image = uiImage - } else { - print("Failed to convert inline data to UIImage") - } + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var response: GenerateContentResponse? + response = try await chat.sendMessage(text) + + if let responseText = response?.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + } + + if let inlineDataPart = response?.inlineDataParts.first { + if let uiImage = UIImage(data: inlineDataPart.data) { + messages[messages.count - 1].image = uiImage + } else { + print("Failed to convert inline data to UIImage") + } + } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift b/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift index 6895e6723..ea4d080ae 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift @@ -15,63 +15,63 @@ import SwiftUI struct BouncingDots: View { - @State - private var dot1YOffset: CGFloat = 0.0 + @State + private var dot1YOffset: CGFloat = 0.0 - @State - private var dot2YOffset: CGFloat = 0.0 + @State + private var dot2YOffset: CGFloat = 0.0 - @State - private var dot3YOffset: CGFloat = 0.0 + @State + private var dot3YOffset: CGFloat = 0.0 - let animation = Animation.easeInOut(duration: 0.8) - .repeatForever(autoreverses: true) + let animation = Animation.easeInOut(duration: 0.8) + .repeatForever(autoreverses: true) - var body: some View { - HStack(spacing: 8) { - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot1YOffset) - .onAppear { - withAnimation(self.animation.delay(0.0)) { - self.dot1YOffset = -5 - } - } - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot2YOffset) - .onAppear { - withAnimation(self.animation.delay(0.2)) { - self.dot2YOffset = -5 - } + var body: some View { + HStack(spacing: 8) { + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot1YOffset) + .onAppear { + withAnimation(self.animation.delay(0.0)) { + self.dot1YOffset = -5 + } + } + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot2YOffset) + .onAppear { + withAnimation(self.animation.delay(0.2)) { + self.dot2YOffset = -5 + } + } + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot3YOffset) + .onAppear { + withAnimation(self.animation.delay(0.4)) { + self.dot3YOffset = -5 + } + } } - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot3YOffset) .onAppear { - withAnimation(self.animation.delay(0.4)) { - self.dot3YOffset = -5 - } - } - } - .onAppear { - let baseOffset: CGFloat = -2 + let baseOffset: CGFloat = -2 - self.dot1YOffset = baseOffset - self.dot2YOffset = baseOffset - self.dot3YOffset = baseOffset + self.dot1YOffset = baseOffset + self.dot2YOffset = baseOffset + self.dot3YOffset = baseOffset + } } - } } struct BouncingDots_Previews: PreviewProvider { - static var previews: some View { - BouncingDots() - .frame(width: 200, height: 50) - .background(.blue) - .roundedCorner(10, corners: [.allCorners]) - } + static var previews: some View { + BouncingDots() + .frame(width: 200, height: 50) + .background(.blue) + .roundedCorner(10, corners: [.allCorners]) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift b/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift index 9b06b4d90..99c2ad435 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift @@ -16,156 +16,156 @@ import ConversationKit import MarkdownUI import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif struct RoundedCorner: Shape { - var radius: CGFloat = .infinity - var corners: UIRectCorner = .allCorners + var radius: CGFloat = .infinity + var corners: UIRectCorner = .allCorners - func path(in rect: CGRect) -> Path { - let path = UIBezierPath( - roundedRect: rect, - byRoundingCorners: corners, - cornerRadii: CGSize(width: radius, height: radius) - ) - return Path(path.cgPath) - } + func path(in rect: CGRect) -> Path { + let path = UIBezierPath( + roundedRect: rect, + byRoundingCorners: corners, + cornerRadii: CGSize(width: radius, height: radius) + ) + return Path(path.cgPath) + } } extension View { - func roundedCorner(_ radius: CGFloat, corners: UIRectCorner) -> some View { - clipShape(RoundedCorner(radius: radius, corners: corners)) - } + func roundedCorner(_ radius: CGFloat, corners: UIRectCorner) -> some View { + clipShape(RoundedCorner(radius: radius, corners: corners)) + } } struct MessageContentView: View { - @Environment(\.presentErrorAction) var presentErrorAction - var message: ChatMessage + @Environment(\.presentErrorAction) var presentErrorAction + var message: ChatMessage - var body: some View { - if message.pending { - BouncingDots() - } else { - // Error Message - if let error = message.error { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - presentErrorAction?(error) - } - .labelStyle(.iconOnly) - } - } else { - VStack(alignment: .leading, spacing: 8) { - if message.participant == .user && !message.attachments.isEmpty { - AttachmentPreviewScrollView(attachments: message.attachments) - } + var body: some View { + if message.pending { + BouncingDots() + } else { + // Error Message + if let error = message.error { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + presentErrorAction?(error) + } + .labelStyle(.iconOnly) + } + } else { + VStack(alignment: .leading, spacing: 8) { + if message.participant == .user && !message.attachments.isEmpty { + AttachmentPreviewScrollView(attachments: message.attachments) + } - if let image = message.image { - Image(uiImage: image) - .resizable() - .aspectRatio(contentMode: .fit) - .frame(maxWidth: 300, maxHeight: 300) - .clipShape(RoundedRectangle(cornerRadius: 8)) - } + if let image = message.image { + Image(uiImage: image) + .resizable() + .aspectRatio(contentMode: .fit) + .frame(maxWidth: 300, maxHeight: 300) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } - // Grounded Response - if let groundingMetadata = message.groundingMetadata { - GroundedResponseView(message: message, groundingMetadata: groundingMetadata) - } else { - // Non-grounded response - ResponseTextView(message: message) - } + // Grounded Response + if let groundingMetadata = message.groundingMetadata { + GroundedResponseView(message: message, groundingMetadata: groundingMetadata) + } else { + // Non-grounded response + ResponseTextView(message: message) + } + } + } } - } } - } } struct ResponseTextView: View { - var message: ChatMessage + var message: ChatMessage - var body: some View { - Markdown(message.content ?? "") - .markdownTextStyle { - FontFamilyVariant(.normal) - FontSize(.em(0.85)) - ForegroundColor(message.participant == .other ? Color(UIColor.label) : .white) - } - .markdownBlockStyle(\.codeBlock) { configuration in - configuration.label - .relativeLineSpacing(.em(0.25)) - .markdownTextStyle { - FontFamilyVariant(.monospaced) - FontSize(.em(0.85)) - ForegroundColor(Color(.label)) - } - .padding() - .background(Color(.secondarySystemBackground)) - .clipShape(RoundedRectangle(cornerRadius: 8)) - .markdownMargin(top: .zero, bottom: .em(0.8)) - } - } + var body: some View { + Markdown(message.content ?? "") + .markdownTextStyle { + FontFamilyVariant(.normal) + FontSize(.em(0.85)) + ForegroundColor(message.participant == .other ? Color(UIColor.label) : .white) + } + .markdownBlockStyle(\.codeBlock) { configuration in + configuration.label + .relativeLineSpacing(.em(0.25)) + .markdownTextStyle { + FontFamilyVariant(.monospaced) + FontSize(.em(0.85)) + ForegroundColor(Color(.label)) + } + .padding() + .background(Color(.secondarySystemBackground)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + .markdownMargin(top: .zero, bottom: .em(0.8)) + } + } } struct MessageView: View { - var message: ChatMessage + var message: ChatMessage - private var participantLabel: String { - message.participant == .user ? "User" : "Model" - } + private var participantLabel: String { + message.participant == .user ? "User" : "Model" + } - var body: some View { - VStack(alignment: message.participant == .user ? .trailing : .leading, spacing: 4) { - // Sender label - Text(participantLabel) - .font(.caption2) - .fontWeight(.medium) - .foregroundColor(.secondary) - .textCase(.uppercase) - .padding(.horizontal, 8) - .padding(.vertical, 2) - .frame(maxWidth: .infinity, alignment: message.participant == .user ? .trailing : .leading) + var body: some View { + VStack(alignment: message.participant == .user ? .trailing : .leading, spacing: 4) { + // Sender label + Text(participantLabel) + .font(.caption2) + .fontWeight(.medium) + .foregroundColor(.secondary) + .textCase(.uppercase) + .padding(.horizontal, 8) + .padding(.vertical, 2) + .frame(maxWidth: .infinity, alignment: message.participant == .user ? .trailing : .leading) - // Message content - HStack { - if message.participant == .user { - Spacer() - } - MessageContentView(message: message) - .padding(10) - .background(message.participant == .other - ? Color(UIColor.systemFill) - : Color(UIColor.systemBlue)) - .roundedCorner(10, - corners: [ - .topLeft, - .topRight, - message.participant == .other ? .bottomRight : .bottomLeft, - ]) - if message.participant == .other { - Spacer() + // Message content + HStack { + if message.participant == .user { + Spacer() + } + MessageContentView(message: message) + .padding(10) + .background(message.participant == .other + ? Color(UIColor.systemFill) + : Color(UIColor.systemBlue)) + .roundedCorner(10, + corners: [ + .topLeft, + .topRight, + message.participant == .other ? .bottomRight : .bottomLeft, + ]) + if message.participant == .other { + Spacer() + } + } } - } + .listRowSeparator(.hidden) } - .listRowSeparator(.hidden) - } } struct MessageView_Previews: PreviewProvider { - static var previews: some View { - NavigationView { - List { - MessageView(message: ChatMessage.samples[0]) - MessageView(message: ChatMessage.samples[1]) - MessageView(message: ChatMessage.samples[2]) - MessageView(message: ChatMessage(content: "Hello!", participant: .other, pending: true)) - } - .listStyle(.plain) - .navigationTitle("Chat example") + static var previews: some View { + NavigationView { + List { + MessageView(message: ChatMessage.samples[0]) + MessageView(message: ChatMessage.samples[1]) + MessageView(message: ChatMessage.samples[2]) + MessageView(message: ChatMessage(content: "Hello!", participant: .other, pending: true)) + } + .listStyle(.plain) + .navigationTitle("Chat example") + } } - } } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift index 97d26081e..ad9a4288e 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift @@ -13,59 +13,60 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import SwiftUI import ConversationKit +import SwiftUI struct FunctionCallingScreen: View { - let backendType: BackendOption - @StateObject var viewModel: FunctionCallingViewModel + let backendType: BackendOption + @StateObject var viewModel: FunctionCallingViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: FunctionCallingViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: FunctionCallingViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { error in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) + { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { _ in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) } - } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - FunctionCallingScreen(backendType: .googleAI) + FunctionCallingScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift index 8b257af1c..c9379b98e 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift @@ -13,19 +13,19 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import Foundation import UIKit class WeatherService { - public static func fetchWeather(city: String, state: String, date: String) -> JSONObject { - return [ - "temperature": .number(38), - "chancePrecipitation": .string("56%"), - "cloudCover": .string("partlyCloudy"), - ] - } + public static func fetchWeather(city _: String, state _: String, date _: String) -> JSONObject { + return [ + "temperature": .number(38), + "chancePrecipitation": .string("56%"), + "cloudCover": .string("partlyCloudy"), + ] + } } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift index 215d513b2..a567cbc0d 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift @@ -13,276 +13,278 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Foundation -import UIKit import Combine import ConversationKit +import Foundation +import UIKit @MainActor class FunctionCallingViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var error: Error? - var hasError: Bool { - return error != nil - } - - @Published var presentErrorDetails: Bool = false + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var presentErrorDetails: Bool = false - private var model: GenerativeModel - private var chat: Chat + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var chatTask: Task? + private var model: GenerativeModel + private var chat: Chat - private var sample: Sample? - private var backendType: BackendOption + private var chatTask: Task? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + private var sample: Sample? + private var backendType: BackendOption - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - // create a generative model with sample data - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash-lite", - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - chat = model.startChat() + // create a generative model with sample data + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash-lite", + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" - } + chat = model.startChat() - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" } - } - - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" - } - - func stop() { - chatTask?.cancel() - error = nil - } - - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - - var functionCalls = [FunctionCallPart]() - - for try await chunk in responseStream { - if !chunk.functionCalls.isEmpty { - functionCalls.append(contentsOf: chunk.functionCalls) - } - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - messages[messages.count - 1].pending = false - } - } - // On functionCalls, never keep reading the old stream or call the second API inside the first for-loop. - // Start a NEW stream only after the function response turn is sent. - if !functionCalls.isEmpty { - try await handleFunctionCallsStreaming(functionCalls) + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } - - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - do { - let response = try await chat.sendMessage(text) - - if !response.functionCalls.isEmpty { - try await handleFunctionCalls(response) - } else { - if let responseText = response.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false - } - } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" } - } - - private func handleFunctionCallsStreaming(_ functionCalls: [FunctionCallPart]) async throws { - var functionResponses = [FunctionResponsePart]() - - for functionCall in functionCalls { - switch functionCall.name { - case "fetchWeather": - guard case let .string(city) = functionCall.args["city"], - case let .string(state) = functionCall.args["state"], - case let .string(date) = functionCall.args["date"] else { - throw NSError( - domain: "FunctionCallingError", - code: 0, - userInfo: [ - NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", - ] - ) - } - functionResponses.append( - FunctionResponsePart( - name: functionCall.name, - response: WeatherService.fetchWeather(city: city, state: state, date: date) - ) - ) - default: - print("Unknown function named \"\(functionCall.name)\".") - } + func stop() { + chatTask?.cancel() + error = nil } - if !functionResponses.isEmpty { - let finalResponse = try chat - .sendMessageStream([ModelContent(role: "function", parts: functionResponses)]) - - for try await chunk in finalResponse { - guard let candidate = chunk.candidates.first else { - throw NSError( - domain: "FunctionCallingError", - code: 1, - userInfo: [NSLocalizedDescriptionKey: "No candidate in response chunk"] - ) + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + + var functionCalls = [FunctionCallPart]() + + for try await chunk in responseStream { + if !chunk.functionCalls.isEmpty { + functionCalls.append(contentsOf: chunk.functionCalls) + } + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + messages[messages.count - 1].pending = false + } + } + + // On functionCalls, never keep reading the old stream or call the second API inside the first for-loop. + // Start a NEW stream only after the function response turn is sent. + if !functionCalls.isEmpty { + try await handleFunctionCallsStreaming(functionCalls) + } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } - for part in candidate.content.parts { - if let textPart = part as? TextPart { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + textPart.text - messages[messages.count - 1].pending = false - } + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let response = try await chat.sendMessage(text) + + if !response.functionCalls.isEmpty { + try await handleFunctionCalls(response) + } else { + if let responseText = response.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + } + } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } - } } - } - - private func handleFunctionCalls(_ response: GenerateContentResponse) async throws { - var functionResponses = [FunctionResponsePart]() - - for functionCall in response.functionCalls { - switch functionCall.name { - case "fetchWeather": - guard case let .string(city) = functionCall.args["city"], - case let .string(state) = functionCall.args["state"], - case let .string(date) = functionCall.args["date"] else { - throw NSError( - domain: "FunctionCallingError", - code: 0, - userInfo: [ - NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", - ] - ) + + private func handleFunctionCallsStreaming(_ functionCalls: [FunctionCallPart]) async throws { + var functionResponses = [FunctionResponsePart]() + + for functionCall in functionCalls { + switch functionCall.name { + case "fetchWeather": + guard case let .string(city) = functionCall.args["city"], + case let .string(state) = functionCall.args["state"], + case let .string(date) = functionCall.args["date"] + else { + throw NSError( + domain: "FunctionCallingError", + code: 0, + userInfo: [ + NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", + ] + ) + } + + functionResponses.append( + FunctionResponsePart( + name: functionCall.name, + response: WeatherService.fetchWeather(city: city, state: state, date: date) + ) + ) + default: + print("Unknown function named \"\(functionCall.name)\".") + } } - functionResponses.append( - FunctionResponsePart( - name: functionCall.name, - response: WeatherService.fetchWeather(city: city, state: state, date: date) - ) - ) - default: - print("Unknown function named \"\(functionCall.name)\".") - } + if !functionResponses.isEmpty { + let finalResponse = try chat + .sendMessageStream([ModelContent(role: "function", parts: functionResponses)]) + + for try await chunk in finalResponse { + guard let candidate = chunk.candidates.first else { + throw NSError( + domain: "FunctionCallingError", + code: 1, + userInfo: [NSLocalizedDescriptionKey: "No candidate in response chunk"] + ) + } + + for part in candidate.content.parts { + if let textPart = part as? TextPart { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + textPart.text + messages[messages.count - 1].pending = false + } + } + } + } } - if !functionResponses.isEmpty { - let finalResponse = try await chat - .sendMessage([ModelContent(role: "function", parts: functionResponses)]) - - guard let candidate = finalResponse.candidates.first else { - throw NSError( - domain: "FunctionCallingError", - code: 1, - userInfo: [NSLocalizedDescriptionKey: "No candidate in response"] - ) - } + private func handleFunctionCalls(_ response: GenerateContentResponse) async throws { + var functionResponses = [FunctionResponsePart]() + + for functionCall in response.functionCalls { + switch functionCall.name { + case "fetchWeather": + guard case let .string(city) = functionCall.args["city"], + case let .string(state) = functionCall.args["state"], + case let .string(date) = functionCall.args["date"] + else { + throw NSError( + domain: "FunctionCallingError", + code: 0, + userInfo: [ + NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", + ] + ) + } + + functionResponses.append( + FunctionResponsePart( + name: functionCall.name, + response: WeatherService.fetchWeather(city: city, state: state, date: date) + ) + ) + default: + print("Unknown function named \"\(functionCall.name)\".") + } + } - for part in candidate.content.parts { - if let textPart = part as? TextPart { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + textPart.text - messages[messages.count - 1].pending = false + if !functionResponses.isEmpty { + let finalResponse = try await chat + .sendMessage([ModelContent(role: "function", parts: functionResponses)]) + + guard let candidate = finalResponse.candidates.first else { + throw NSError( + domain: "FunctionCallingError", + code: 1, + userInfo: [NSLocalizedDescriptionKey: "No candidate in response"] + ) + } + + for part in candidate.content.parts { + if let textPart = part as? TextPart { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + textPart.text + messages[messages.count - 1].pending = false + } + } } - } } - } } diff --git a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift index 539cf95f7..43f591b2c 100644 --- a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift @@ -16,95 +16,95 @@ import ConversationKit import MarkdownUI import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif struct GenerateContentFromTemplateScreen: View { - let backendType: BackendOption - @StateObject var viewModel: GenerateContentFromTemplateViewModel + let backendType: BackendOption + @StateObject var viewModel: GenerateContentFromTemplateViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: GenerateContentFromTemplateViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: GenerateContentFromTemplateViewModel(backendType: backendType, + sample: sample)) + } - enum FocusedField: Hashable { - case message - } + enum FocusedField: Hashable { + case message + } - @FocusState - var focusedField: FocusedField? + @FocusState + var focusedField: FocusedField? - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $viewModel.userInput) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $viewModel.userInput) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) - } - } + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) + } + } - HStack(alignment: .top) { - Image(systemName: "text.bubble.fill") - .font(.title2) + HStack(alignment: .top) { + Image(systemName: "text.bubble.fill") + .font(.title2) - Markdown(viewModel.content) - } - .padding() + Markdown(viewModel.content) + } + .padding() + } + } + if viewModel.inProgress { + ProgressOverlay() + } + } + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .navigationTitle("Story teller") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message } - } - if viewModel.inProgress { - ProgressOverlay() - } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .navigationTitle("Story teller") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message } - } - private func sendMessage() { - Task { - await viewModel.generateContent(prompt: viewModel.userInput) - focusedField = .message + private func sendMessage() { + Task { + await viewModel.generateContent(prompt: viewModel.userInput) + focusedField = .message + } } - } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() + } } - } } #Preview { - NavigationStack { - GenerateContentFromTemplateScreen(backendType: .googleAI) - } + NavigationStack { + GenerateContentFromTemplateScreen(backendType: .googleAI) + } } diff --git a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift index 642428417..f0ced4875 100644 --- a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift @@ -13,98 +13,98 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif +import Combine import Foundation import OSLog import SwiftUI -import Combine @MainActor class GenerateContentFromTemplateViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var userInput: String = "" + @Published + var userInput: String = "" - @Published - var content: String = "" + @Published + var content: String = "" - @Published - var error: Error? - var hasError: Bool { - return error != nil - } + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - @Published - var presentErrorDetails: Bool = false + @Published + var presentErrorDetails: Bool = false - @Published - var inProgress = false + @Published + var inProgress = false - private let model: TemplateGenerativeModel - private var backendType: BackendOption + private let model: TemplateGenerativeModel + private var backendType: BackendOption - private var generateContentTask: Task? + private var generateContentTask: Task? - private var sample: Sample? + private var sample: Sample? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - model = firebaseService.templateGenerativeModel() + model = firebaseService.templateGenerativeModel() - if let sample { - userInput = sample.initialPrompt ?? "" + if let sample { + userInput = sample.initialPrompt ?? "" + } } - } - - func generateContent(prompt: String) async { - stop() - - generateContentTask = Task { - inProgress = true - defer { - inProgress = false - } - - // Clear previous content before generating new content - content = "" - - do { - let responseStream = try model.generateContentStream( - templateID: "apple-qs-greeting", - inputs: [ - "name": prompt, - "language": "Spanish", - ] - ) - - for try await chunk in responseStream { - if let text = chunk.text { - if !Task.isCancelled { - content += text + + func generateContent(prompt: String) async { + stop() + + generateContentTask = Task { + inProgress = true + defer { + inProgress = false + } + + // Clear previous content before generating new content + content = "" + + do { + let responseStream = try model.generateContentStream( + templateID: "apple-qs-greeting", + inputs: [ + "name": prompt, + "language": "Spanish", + ] + ) + + for try await chunk in responseStream { + if let text = chunk.text { + if !Task.isCancelled { + content += text + } + } + } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating content from template: \(error)") + } } - } - } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating content from template: \(error)") } - } } - } - func stop() { - generateContentTask?.cancel() - generateContentTask = nil - } + func stop() { + generateContentTask?.cancel() + generateContentTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift index 77bc414da..bc3f451fa 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift @@ -13,59 +13,60 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import SwiftUI import ConversationKit +import SwiftUI struct GroundingScreen: View { - let backendType: BackendOption - @StateObject var viewModel: GroundingViewModel + let backendType: BackendOption + @StateObject var viewModel: GroundingViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: GroundingViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: GroundingViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { error in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) + { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { _ in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) } - } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - GroundingScreen(backendType: .googleAI) + GroundingScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift b/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift index 7085cd8b5..ec327647d 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift @@ -13,171 +13,171 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Foundation import Combine -import UIKit import ConversationKit +import Foundation +import UIKit @MainActor class GroundingViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var error: Error? - var hasError: Bool { - return error != nil - } - - @Published var presentErrorDetails: Bool = false + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var presentErrorDetails: Bool = false - private var model: GenerativeModel - private var chat: Chat + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var chatTask: Task? + private var model: GenerativeModel + private var chat: Chat - private var sample: Sample? + private var chatTask: Task? - private var backendType: BackendOption + private var sample: Sample? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + private var backendType: BackendOption - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - chat = model.startChat() + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" - } + chat = model.startChat() - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" } - } - - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" - } - - func stop() { - chatTask?.cancel() - error = nil - } - - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } - - if let candidate = chunk.candidates.first { - if let groundingMetadata = candidate.groundingMetadata { - self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata - } - } + + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } + } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" } - } - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() + func stop() { + chatTask?.cancel() + error = nil + } - chatTask = Task { - busy = true - defer { - busy = false - } + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) + chatTask = Task { + busy = true + defer { + busy = false + } - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } + + if let candidate = chunk.candidates.first { + if let groundingMetadata = candidate.groundingMetadata { + self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata + } + } + } + + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } + } + } - do { - var response: GenerateContentResponse? - response = try await chat.sendMessage(text) + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - if let responseText = response?.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false + chatTask = Task { + busy = true + defer { + busy = false + } - if let candidate = response?.candidates.first { - if let groundingMetadata = candidate.groundingMetadata { - self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var response: GenerateContentResponse? + response = try await chat.sendMessage(text) + + if let responseText = response?.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + + if let candidate = response?.candidates.first { + if let groundingMetadata = candidate.groundingMetadata { + self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata + } + } + } + + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage } - } } - - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift index eaf66c076..7302f5fdb 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift @@ -20,60 +20,61 @@ import WebKit /// This is added to the bottom of chat messages containing results grounded /// in Google Search. struct GoogleSearchSuggestionView: UIViewRepresentable { - let htmlString: String + let htmlString: String - // This Coordinator class will act as the web view's navigation delegate. - class Coordinator: NSObject, WKNavigationDelegate { - func webView(_ webView: WKWebView, - decidePolicyFor navigationAction: WKNavigationAction, - decisionHandler: @escaping (WKNavigationActionPolicy) -> Void) { - // Check if the navigation was triggered by a user clicking a link. - if navigationAction.navigationType == .linkActivated { - if let url = navigationAction.request.url { - // Open the URL in the system's default browser (e.g., Safari). - UIApplication.shared.open(url) + // This Coordinator class will act as the web view's navigation delegate. + class Coordinator: NSObject, WKNavigationDelegate { + func webView(_: WKWebView, + decidePolicyFor navigationAction: WKNavigationAction, + decisionHandler: @escaping (WKNavigationActionPolicy) -> Void) + { + // Check if the navigation was triggered by a user clicking a link. + if navigationAction.navigationType == .linkActivated { + if let url = navigationAction.request.url { + // Open the URL in the system's default browser (e.g., Safari). + UIApplication.shared.open(url) + } + // Cancel the navigation inside our small web view. + decisionHandler(.cancel) + return + } + // For all other navigation types (like the initial HTML load), allow it. + decisionHandler(.allow) } - // Cancel the navigation inside our small web view. - decisionHandler(.cancel) - return - } - // For all other navigation types (like the initial HTML load), allow it. - decisionHandler(.allow) } - } - func makeCoordinator() -> Coordinator { - Coordinator() - } + func makeCoordinator() -> Coordinator { + Coordinator() + } - func makeUIView(context: Context) -> WKWebView { - let webView = WKWebView() - webView.isOpaque = false - webView.backgroundColor = .clear - webView.scrollView.backgroundColor = .clear - webView.scrollView.isScrollEnabled = false - // Set the coordinator as the navigation delegate. - webView.navigationDelegate = context.coordinator - return webView - } + func makeUIView(context: Context) -> WKWebView { + let webView = WKWebView() + webView.isOpaque = false + webView.backgroundColor = .clear + webView.scrollView.backgroundColor = .clear + webView.scrollView.isScrollEnabled = false + // Set the coordinator as the navigation delegate. + webView.navigationDelegate = context.coordinator + return webView + } - func updateUIView(_ uiView: WKWebView, context: Context) { - // The renderedContent is an HTML snippet with CSS. - // For it to render correctly, we wrap it in a basic HTML document structure. - let fullHTML = """ - - - - - - - - \(htmlString) - - - """ - uiView.loadHTMLString(fullHTML, baseURL: nil) - } + func updateUIView(_ uiView: WKWebView, context _: Context) { + // The renderedContent is an HTML snippet with CSS. + // For it to render correctly, we wrap it in a basic HTML document structure. + let fullHTML = """ + + + + + + + + \(htmlString) + + + """ + uiView.loadHTMLString(fullHTML, baseURL: nil) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift index ea0501926..9d10f56ca 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift @@ -13,73 +13,73 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import SwiftUI /// A view that displays a chat message that is grounded in Google Search. struct GroundedResponseView: View { - var message: ChatMessage - var groundingMetadata: GroundingMetadata + var message: ChatMessage + var groundingMetadata: GroundingMetadata - var body: some View { - // We can only display a response grounded in Google Search if the searchEntrypoint is non-nil. - let isCompliant = (groundingMetadata.groundingChunks.isEmpty || groundingMetadata - .searchEntryPoint != nil) - if isCompliant { - HStack(alignment: .top, spacing: 8) { - VStack(alignment: .leading, spacing: 8) { - // Message text - ResponseTextView(message: message) + var body: some View { + // We can only display a response grounded in Google Search if the searchEntrypoint is non-nil. + let isCompliant = (groundingMetadata.groundingChunks.isEmpty || groundingMetadata + .searchEntryPoint != nil) + if isCompliant { + HStack(alignment: .top, spacing: 8) { + VStack(alignment: .leading, spacing: 8) { + // Message text + ResponseTextView(message: message) - if !groundingMetadata.groundingChunks.isEmpty { - Divider() - // Source links - ForEach(0 ..< groundingMetadata.groundingChunks.count, id: \.self) { index in - if let webChunk = groundingMetadata.groundingChunks[index].web { - SourceLinkView( - title: webChunk.title ?? "Untitled Source", - uri: webChunk.uri - ) - } + if !groundingMetadata.groundingChunks.isEmpty { + Divider() + // Source links + ForEach(0 ..< groundingMetadata.groundingChunks.count, id: \.self) { index in + if let webChunk = groundingMetadata.groundingChunks[index].web { + SourceLinkView( + title: webChunk.title ?? "Untitled Source", + uri: webChunk.uri + ) + } + } + } + // Search suggestions + if let searchEntryPoint = groundingMetadata.searchEntryPoint { + Divider() + GoogleSearchSuggestionView(htmlString: searchEntryPoint.renderedContent) + .frame(height: 44) + .clipShape(RoundedRectangle(cornerRadius: 22)) + } + } } - } - // Search suggestions - if let searchEntryPoint = groundingMetadata.searchEntryPoint { - Divider() - GoogleSearchSuggestionView(htmlString: searchEntryPoint.renderedContent) - .frame(height: 44) - .clipShape(RoundedRectangle(cornerRadius: 22)) - } + .frame(maxWidth: .infinity, alignment: .leading) } - } - .frame(maxWidth: .infinity, alignment: .leading) } - } } /// A view for a single, clickable source link. struct SourceLinkView: View { - let title: String - let uri: String? + let title: String + let uri: String? - var body: some View { - if let uri, let url = URL(string: uri) { - Link(destination: url) { - HStack(spacing: 4) { - Image(systemName: "link") - .font(.caption) - .foregroundColor(.secondary) - Text(title) - .font(.footnote) - .underline() - .lineLimit(1) - .multilineTextAlignment(.leading) + var body: some View { + if let uri, let url = URL(string: uri) { + Link(destination: url) { + HStack(spacing: 4) { + Image(systemName: "link") + .font(.caption) + .foregroundColor(.secondary) + Text(title) + .font(.footnote) + .underline() + .lineLimit(1) + .multilineTextAlignment(.leading) + } + } + .buttonStyle(.plain) } - } - .buttonStyle(.plain) } - } } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift index 7e762dbc0..2ed016ad5 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift @@ -14,101 +14,101 @@ import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit struct ImagenFromTemplateScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ImagenFromTemplateViewModel + let backendType: BackendOption + @StateObject var viewModel: ImagenFromTemplateViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ImagenFromTemplateViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ImagenFromTemplateViewModel(backendType: backendType, + sample: sample)) + } + + enum FocusedField: Hashable { + case message + } - enum FocusedField: Hashable { - case message - } + @FocusState + var focusedField: FocusedField? - @FocusState - var focusedField: FocusedField? + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $viewModel.userInput) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $viewModel.userInput) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) + } + } - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) + let spacing: CGFloat = 10 + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: spacing), + GridItem(.flexible(), spacing: spacing), + ], spacing: spacing) { + ForEach(viewModel.images, id: \.self) { image in + Image(uiImage: image) + .resizable() + .aspectRatio(1, contentMode: .fill) + .cornerRadius(12) + .clipped() + } + } + .padding(.horizontal, spacing) + } } - } - - let spacing: CGFloat = 10 - LazyVGrid(columns: [ - GridItem(.flexible(), spacing: spacing), - GridItem(.flexible(), spacing: spacing), - ], spacing: spacing) { - ForEach(viewModel.images, id: \.self) { image in - Image(uiImage: image) - .resizable() - .aspectRatio(1, contentMode: .fill) - .cornerRadius(12) - .clipped() + if viewModel.inProgress { + ProgressOverlay() } - } - .padding(.horizontal, spacing) } - } - if viewModel.inProgress { - ProgressOverlay() - } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .navigationTitle("Imagen Template") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .navigationTitle("Imagen Template") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message + } } - } - private func sendMessage() { - Task { - await viewModel.generateImageFromTemplate(prompt: viewModel.userInput) - focusedField = .message + private func sendMessage() { + Task { + await viewModel.generateImageFromTemplate(prompt: viewModel.userInput) + focusedField = .message + } } - } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() + } } - } } #Preview { - ImagenFromTemplateScreen(backendType: .googleAI) + ImagenFromTemplateScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift index 50c821d0c..d5d72ead4 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift @@ -13,14 +13,14 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif +import Combine import Foundation import OSLog import SwiftUI -import Combine // Template Details // @@ -37,82 +37,82 @@ import Combine @MainActor class ImagenFromTemplateViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - - @Published - var userInput: String = "" - - @Published - var images = [UIImage]() - - @Published - var error: Error? - var hasError: Bool { - return error != nil - } - - @Published - var presentErrorDetails: Bool = false + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var inProgress = false + @Published + var userInput: String = "" - private let model: TemplateImagenModel - private var backendType: BackendOption + @Published + var images = [UIImage]() - private var generateImagesTask: Task? + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - private var sample: Sample? + @Published + var presentErrorDetails: Bool = false - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + @Published + var inProgress = false - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + private let model: TemplateImagenModel + private var backendType: BackendOption - model = firebaseService.templateImagenModel() - } + private var generateImagesTask: Task? - func generateImageFromTemplate(prompt: String) async { - stop() + private var sample: Sample? - generateImagesTask = Task { - inProgress = true - defer { - inProgress = false - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - do { - // 1. Call generateImages with the text prompt - let response = try await model.generateImages( - templateID: "imagen-generation-basic", - inputs: [ - "prompt": prompt, - ] - ) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - // 2. Print the reason images were filtered out, if any. - if let filteredReason = response.filteredReason { - print("Image(s) Blocked: \(filteredReason)") - } + model = firebaseService.templateImagenModel() + } - if !Task.isCancelled { - // 3. Convert the image data to UIImage for display in the UI - images = response.images.compactMap { UIImage(data: $0.data) } + func generateImageFromTemplate(prompt: String) async { + stop() + + generateImagesTask = Task { + inProgress = true + defer { + inProgress = false + } + + do { + // 1. Call generateImages with the text prompt + let response = try await model.generateImages( + templateID: "imagen-generation-basic", + inputs: [ + "prompt": prompt, + ] + ) + + // 2. Print the reason images were filtered out, if any. + if let filteredReason = response.filteredReason { + print("Image(s) Blocked: \(filteredReason)") + } + + if !Task.isCancelled { + // 3. Convert the image data to UIImage for display in the UI + images = response.images.compactMap { UIImage(data: $0.data) } + } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating images from template: \(error)") + } + } } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating images from template: \(error)") - } - } } - } - func stop() { - generateImagesTask?.cancel() - generateImagesTask = nil - } + func stop() { + generateImagesTask?.cancel() + generateImagesTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift index 76a2bbb2e..dd527fd6e 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift @@ -14,107 +14,107 @@ import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit struct ImagenScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ImagenViewModel + let backendType: BackendOption + @StateObject var viewModel: ImagenViewModel - @State - private var userPrompt = "" + @State + private var userPrompt = "" - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ImagenViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ImagenViewModel(backendType: backendType, + sample: sample)) + } + + enum FocusedField: Hashable { + case message + } - enum FocusedField: Hashable { - case message - } + @FocusState + var focusedField: FocusedField? - @FocusState - var focusedField: FocusedField? + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $userPrompt) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $userPrompt) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) + } + } - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) + let spacing: CGFloat = 10 + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: spacing), + GridItem(.flexible(), spacing: spacing), + ], spacing: spacing) { + ForEach(viewModel.images, id: \.self) { image in + Image(uiImage: image) + .resizable() + .aspectRatio(1, contentMode: .fill) + .cornerRadius(12) + .clipped() + } + } + .padding(.horizontal, spacing) + } } - } - - let spacing: CGFloat = 10 - LazyVGrid(columns: [ - GridItem(.flexible(), spacing: spacing), - GridItem(.flexible(), spacing: spacing), - ], spacing: spacing) { - ForEach(viewModel.images, id: \.self) { image in - Image(uiImage: image) - .resizable() - .aspectRatio(1, contentMode: .fill) - .cornerRadius(12) - .clipped() + if viewModel.inProgress { + ProgressOverlay() + } + } + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .navigationTitle("Imagen example") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message + if userPrompt.isEmpty && !viewModel.initialPrompt.isEmpty { + userPrompt = viewModel.initialPrompt } - } - .padding(.horizontal, spacing) } - } - if viewModel.inProgress { - ProgressOverlay() - } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .navigationTitle("Imagen example") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message - if userPrompt.isEmpty && !viewModel.initialPrompt.isEmpty { - userPrompt = viewModel.initialPrompt - } } - } - private func sendMessage() { - Task { - await viewModel.generateImage(prompt: userPrompt) - focusedField = .message + private func sendMessage() { + Task { + await viewModel.generateImage(prompt: userPrompt) + focusedField = .message + } } - } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() + } } - } } #Preview { - ImagenScreen(backendType: .googleAI) + ImagenScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift index 2328f83fe..144fa3a0d 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift @@ -13,102 +13,102 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Foundation import Combine +import Foundation import OSLog import SwiftUI @MainActor class ImagenViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - - @Published - var initialPrompt: String = "" - - @Published - var images = [UIImage]() - - @Published - var error: Error? - var hasError: Bool { - return error != nil - } - - @Published - var presentErrorDetails: Bool = false + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var inProgress = false + @Published + var initialPrompt: String = "" - private let model: ImagenModel - private var backendType: BackendOption + @Published + var images = [UIImage]() - private var generateImagesTask: Task? + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - private var sample: Sample? + @Published + var presentErrorDetails: Bool = false - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + @Published + var inProgress = false - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + private let model: ImagenModel + private var backendType: BackendOption - let modelName = "imagen-4.0-generate-001" - let safetySettings = ImagenSafetySettings( - safetyFilterLevel: .blockLowAndAbove - ) - var generationConfig = ImagenGenerationConfig() - generationConfig.numberOfImages = 4 - generationConfig.aspectRatio = .square1x1 + private var generateImagesTask: Task? - model = firebaseService.imagenModel( - modelName: modelName, - generationConfig: generationConfig, - safetySettings: safetySettings - ) + private var sample: Sample? - initialPrompt = sample?.initialPrompt ?? "" - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - func generateImage(prompt: String) async { - stop() + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - generateImagesTask = Task { - inProgress = true - defer { - inProgress = false - } + let modelName = "imagen-4.0-generate-001" + let safetySettings = ImagenSafetySettings( + safetyFilterLevel: .blockLowAndAbove + ) + var generationConfig = ImagenGenerationConfig() + generationConfig.numberOfImages = 4 + generationConfig.aspectRatio = .square1x1 - do { - // 1. Call generateImages with the text prompt - let response = try await model.generateImages(prompt: prompt) + model = firebaseService.imagenModel( + modelName: modelName, + generationConfig: generationConfig, + safetySettings: safetySettings + ) - // 2. Print the reason images were filtered out, if any. - if let filteredReason = response.filteredReason { - print("Image(s) Blocked: \(filteredReason)") - } + initialPrompt = sample?.initialPrompt ?? "" + } - if !Task.isCancelled { - // 3. Convert the image data to UIImage for display in the UI - images = response.images.compactMap { UIImage(data: $0.data) } + func generateImage(prompt: String) async { + stop() + + generateImagesTask = Task { + inProgress = true + defer { + inProgress = false + } + + do { + // 1. Call generateImages with the text prompt + let response = try await model.generateImages(prompt: prompt) + + // 2. Print the reason images were filtered out, if any. + if let filteredReason = response.filteredReason { + print("Image(s) Blocked: \(filteredReason)") + } + + if !Task.isCancelled { + // 3. Convert the image data to UIImage for display in the UI + images = response.images.compactMap { UIImage(data: $0.data) } + } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating images: \(error)") + } + } } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating images: \(error)") - } - } } - } - func stop() { - generateImagesTask?.cancel() - generateImagesTask = nil - } + func stop() { + generateImagesTask?.cancel() + generateImagesTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift b/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift index 941a38a7c..c00dd4efb 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift @@ -13,62 +13,62 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import SwiftUI import TipKit struct LiveScreen: View { - let backendType: BackendOption - @StateObject var viewModel: LiveViewModel + let backendType: BackendOption + @StateObject var viewModel: LiveViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: LiveViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: LiveViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - VStack(spacing: 20) { - ModelAvatar(isConnected: viewModel.state == .connected) - TranscriptView(typewriter: viewModel.transcriptTypewriter) + var body: some View { + VStack(spacing: 20) { + ModelAvatar(isConnected: viewModel.state == .connected) + TranscriptView(typewriter: viewModel.transcriptTypewriter) - Spacer() - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - if let tip = viewModel.tip, !viewModel.hasTranscripts { - TipView(tip) - } - ConnectButton( - state: viewModel.state, - onConnect: viewModel.connect, - onDisconnect: viewModel.disconnect - ) + Spacer() + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + if let tip = viewModel.tip, !viewModel.hasTranscripts { + TipView(tip) + } + ConnectButton( + state: viewModel.state, + onConnect: viewModel.connect, + onDisconnect: viewModel.disconnect + ) - #if targetEnvironment(simulator) - AudioOutputToggle(isEnabled: $viewModel.isAudioOutputEnabled, onChange: { - Task { - await viewModel.onAudioPlaybackChanged() - } - }) - #endif - } - .padding() - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) - .background(viewModel.backgroundColor ?? .clear) - .onDisappear { - Task { - await viewModel.disconnect() - } + #if targetEnvironment(simulator) + AudioOutputToggle(isEnabled: $viewModel.isAudioOutputEnabled, onChange: { + Task { + await viewModel.onAudioPlaybackChanged() + } + }) + #endif + } + .padding() + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) + .background(viewModel.backgroundColor ?? .clear) + .onDisappear { + Task { + await viewModel.disconnect() + } + } } - } } #Preview { - LiveScreen(backendType: .googleAI) + LiveScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift index 8910a27c8..a3e041cd1 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift @@ -12,286 +12,286 @@ // See the License for the specific language governing permissions and // limitations under the License. +import AVFoundation +import AVKit +import Combine import FirebaseAILogic import Foundation import OSLog -import AVFoundation import SwiftUI -import AVKit -import Combine enum LiveViewModelState { - case idle - case connecting - case connected + case idle + case connecting + case connected } @MainActor class LiveViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - - @Published - var error: Error? - - @Published - var state: LiveViewModelState = .idle - - @Published - var transcriptTypewriter: TypeWriterViewModel = TypeWriterViewModel() - - @Published - var backgroundColor: Color? = nil - - @Published - var hasTranscripts: Bool = false - - @Published - var title: String - - @Published - var tip: InlineTip? - - @Published - var isAudioOutputEnabled: Bool = { - #if targetEnvironment(simulator) - return false - #else - return true - #endif - }() - - private var model: LiveGenerativeModel? - private var liveSession: LiveSession? - - private var audioController: AudioController? - private var microphoneTask = Task {} - - init(backendType: BackendOption, sample: Sample? = nil) { - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) - - model = firebaseService.liveModel( - modelName: (backendType == .googleAI) ? "gemini-2.5-flash-native-audio-preview-09-2025" : - "gemini-live-2.5-flash-preview-native-audio-09-2025", - generationConfig: sample?.liveGenerationConfig, - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) - title = sample?.title ?? "" - tip = sample?.tip - } - - /// Start a connection to the model. - /// - /// If a connection is already active, you'll need to call ``LiveViewModel/disconnect()`` first. - func connect() async { - guard let model, state == .idle else { - return + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + + @Published + var error: Error? + + @Published + var state: LiveViewModelState = .idle + + @Published + var transcriptTypewriter: TypeWriterViewModel = .init() + + @Published + var backgroundColor: Color? = nil + + @Published + var hasTranscripts: Bool = false + + @Published + var title: String + + @Published + var tip: InlineTip? + + @Published + var isAudioOutputEnabled: Bool = { + #if targetEnvironment(simulator) + return false + #else + return true + #endif + }() + + private var model: LiveGenerativeModel? + private var liveSession: LiveSession? + + private var audioController: AudioController? + private var microphoneTask = Task {} + + init(backendType: BackendOption, sample: Sample? = nil) { + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) + + model = firebaseService.liveModel( + modelName: (backendType == .googleAI) ? "gemini-2.5-flash-native-audio-preview-09-2025" : + "gemini-live-2.5-flash-preview-native-audio-09-2025", + generationConfig: sample?.liveGenerationConfig, + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) + title = sample?.title ?? "" + tip = sample?.tip } - if !isAudioOutputEnabled { - logger.warning("Playback audio is disabled.") - } + /// Start a connection to the model. + /// + /// If a connection is already active, you'll need to call ``LiveViewModel/disconnect()`` first. + func connect() async { + guard let model, state == .idle else { + return + } - guard await requestRecordPermission() else { - logger.warning("The user denied us permission to record the microphone.") - isAudioOutputEnabled = false - return - } + if !isAudioOutputEnabled { + logger.warning("Playback audio is disabled.") + } - state = .connecting - transcriptTypewriter.restart() - hasTranscripts = false + guard await requestRecordPermission() else { + logger.warning("The user denied us permission to record the microphone.") + isAudioOutputEnabled = false + return + } - do { - liveSession = try await model.connect() - audioController = try await AudioController() + state = .connecting + transcriptTypewriter.restart() + hasTranscripts = false - try await startRecording() + do { + liveSession = try await model.connect() + audioController = try await AudioController() - state = .connected - try await startProcessingResponses() - } catch { - logger.error("\(String(describing: error))") - self.error = error - await disconnect() - } - } - - func onAudioPlaybackChanged() async { - if isAudioOutputEnabled { - guard await requestRecordPermission() else { - logger.warning("The user denied us permission to record the microphone.") - isAudioOutputEnabled = false - return - } + try await startRecording() + + state = .connected + try await startProcessingResponses() + } catch { + logger.error("\(String(describing: error))") + self.error = error + await disconnect() + } } - } - - /// Disconnects the model. - /// - /// Will stop any pending playback, and the recording of the mic. - func disconnect() async { - await audioController?.stop() - await liveSession?.close() - microphoneTask.cancel() - state = .idle - liveSession = nil - transcriptTypewriter.clearPending() - - withAnimation { - backgroundColor = nil + + func onAudioPlaybackChanged() async { + if isAudioOutputEnabled { + guard await requestRecordPermission() else { + logger.warning("The user denied us permission to record the microphone.") + isAudioOutputEnabled = false + return + } + } } - } - /// Starts recording data from the user's microphone, and sends it to the model. - private func startRecording() async throws { - guard let audioController, let liveSession else { return } + /// Disconnects the model. + /// + /// Will stop any pending playback, and the recording of the mic. + func disconnect() async { + await audioController?.stop() + await liveSession?.close() + microphoneTask.cancel() + state = .idle + liveSession = nil + transcriptTypewriter.clearPending() + + withAnimation { + backgroundColor = nil + } + } - let stream = try await audioController.listenToMic() - microphoneTask = Task { - do { - for await audioBuffer in stream { - await liveSession.sendAudioRealtime(try audioBuffer.int16Data()) + /// Starts recording data from the user's microphone, and sends it to the model. + private func startRecording() async throws { + guard let audioController, let liveSession else { return } + + let stream = try await audioController.listenToMic() + microphoneTask = Task { + do { + for await audioBuffer in stream { + try await liveSession.sendAudioRealtime(audioBuffer.int16Data()) + } + } catch { + logger.error("\(String(describing: error))") + self.error = error + await disconnect() + } } - } catch { - logger.error("\(String(describing: error))") - self.error = error - await disconnect() - } } - } - /// Starts queuing responses from the model for parsing. - private func startProcessingResponses() async throws { - guard let liveSession else { return } + /// Starts queuing responses from the model for parsing. + private func startProcessingResponses() async throws { + guard let liveSession else { return } - for try await response in liveSession.responses { - try await processServerMessage(response) - } - } - - /// Requests permission to record the user's microphone, returning the result. - /// - /// This is a requirement on iOS devices, on top of needing the proper recording - /// intents. - private func requestRecordPermission() async -> Bool { - await withCheckedContinuation { cont in - if #available(iOS 17.0, *) { - Task { - let ok = await AVAudioApplication.requestRecordPermission() - cont.resume(with: .success(ok)) - } - } else { - AVAudioSession.sharedInstance().requestRecordPermission { ok in - cont.resume(with: .success(ok)) + for try await response in liveSession.responses { + try await processServerMessage(response) } - } } - } - - private func processServerMessage(_ message: LiveServerMessage) async throws { - switch message.payload { - case let .content(content): - try await processServerContent(content) - case let .toolCall(toolCall): - try await processFunctionCalls(functionCalls: toolCall.functionCalls ?? []) - case .toolCallCancellation: - // we don't have any long running functions to cancel - return - case let .goingAwayNotice(goingAwayNotice): - let time = goingAwayNotice.timeLeft?.description ?? "soon" - logger.warning("Going away in: \(time)") - } - } - private func processServerContent(_ content: LiveServerContent) async throws { - if let message = content.modelTurn { - try await processAudioMessages(message) + /// Requests permission to record the user's microphone, returning the result. + /// + /// This is a requirement on iOS devices, on top of needing the proper recording + /// intents. + private func requestRecordPermission() async -> Bool { + await withCheckedContinuation { cont in + if #available(iOS 17.0, *) { + Task { + let ok = await AVAudioApplication.requestRecordPermission() + cont.resume(with: .success(ok)) + } + } else { + AVAudioSession.sharedInstance().requestRecordPermission { ok in + cont.resume(with: .success(ok)) + } + } + } } - if content.isTurnComplete { - // add a space, so the next time a transcript comes in, it's not squished with the previous one - transcriptTypewriter.appendText(" ") + private func processServerMessage(_ message: LiveServerMessage) async throws { + switch message.payload { + case let .content(content): + try await processServerContent(content) + case let .toolCall(toolCall): + try await processFunctionCalls(functionCalls: toolCall.functionCalls ?? []) + case .toolCallCancellation: + // we don't have any long running functions to cancel + return + case let .goingAwayNotice(goingAwayNotice): + let time = goingAwayNotice.timeLeft?.description ?? "soon" + logger.warning("Going away in: \(time)") + } } - if content.wasInterrupted { - logger.warning("Model was interrupted") - await audioController?.interrupt() - transcriptTypewriter.clearPending() - // adds an em dash to indicate that the model was cutoff - transcriptTypewriter.appendText("— ") - } else if let transcript = content.outputAudioTranscription?.text { - appendAudioTranscript(transcript) - } - } - - private func processAudioMessages(_ content: ModelContent) async throws { - for part in content.parts { - if let part = part as? InlineDataPart { - if part.mimeType.starts(with: "audio/pcm") { - if isAudioOutputEnabled { - try await audioController?.playAudio(audio: part.data) - } - } else { - logger.warning("Received non audio inline data part: \(part.mimeType)") + private func processServerContent(_ content: LiveServerContent) async throws { + if let message = content.modelTurn { + try await processAudioMessages(message) } - } - } - } - - private func processFunctionCalls(functionCalls: [FunctionCallPart]) async throws { - let responses = try functionCalls.map { functionCall in - switch functionCall.name { - case "changeBackgroundColor": - return try changeBackgroundColor(args: functionCall.args, id: functionCall.functionId) - case "clearBackgroundColor": - return clearBackgroundColor(id: functionCall.functionId) - default: - logger.debug("Function call: \(String(describing: functionCall))") - throw ApplicationError("Unknown function named \"\(functionCall.name)\".") - } - } - await liveSession?.sendFunctionResponses(responses) - } + if content.isTurnComplete { + // add a space, so the next time a transcript comes in, it's not squished with the previous one + transcriptTypewriter.appendText(" ") + } - private func appendAudioTranscript(_ transcript: String) { - hasTranscripts = true - transcriptTypewriter.appendText(transcript) - } + if content.wasInterrupted { + logger.warning("Model was interrupted") + await audioController?.interrupt() + transcriptTypewriter.clearPending() + // adds an em dash to indicate that the model was cutoff + transcriptTypewriter.appendText("— ") + } else if let transcript = content.outputAudioTranscription?.text { + appendAudioTranscript(transcript) + } + } - private func changeBackgroundColor(args: JSONObject, id: String?) throws -> FunctionResponsePart { - guard case let .string(color) = args["color"] else { - logger.debug("Function arguments: \(String(describing: args))") - throw ApplicationError("Missing `color` parameter.") + private func processAudioMessages(_ content: ModelContent) async throws { + for part in content.parts { + if let part = part as? InlineDataPart { + if part.mimeType.starts(with: "audio/pcm") { + if isAudioOutputEnabled { + try await audioController?.playAudio(audio: part.data) + } + } else { + logger.warning("Received non audio inline data part: \(part.mimeType)") + } + } + } } - withAnimation { - backgroundColor = Color(hex: color) + private func processFunctionCalls(functionCalls: [FunctionCallPart]) async throws { + let responses = try functionCalls.map { functionCall in + switch functionCall.name { + case "changeBackgroundColor": + return try changeBackgroundColor(args: functionCall.args, id: functionCall.functionId) + case "clearBackgroundColor": + return clearBackgroundColor(id: functionCall.functionId) + default: + logger.debug("Function call: \(String(describing: functionCall))") + throw ApplicationError("Unknown function named \"\(functionCall.name)\".") + } + } + + await liveSession?.sendFunctionResponses(responses) } - if backgroundColor == nil { - logger.warning("The model sent us an invalid hex color: \(color)") + private func appendAudioTranscript(_ transcript: String) { + hasTranscripts = true + transcriptTypewriter.appendText(transcript) } - return FunctionResponsePart( - name: "changeBackgroundColor", - response: JSONObject(), - functionId: id - ) - } + private func changeBackgroundColor(args: JSONObject, id: String?) throws -> FunctionResponsePart { + guard case let .string(color) = args["color"] else { + logger.debug("Function arguments: \(String(describing: args))") + throw ApplicationError("Missing `color` parameter.") + } + + withAnimation { + backgroundColor = Color(hex: color) + } + + if backgroundColor == nil { + logger.warning("The model sent us an invalid hex color: \(color)") + } - private func clearBackgroundColor(id: String?) -> FunctionResponsePart { - withAnimation { - backgroundColor = nil + return FunctionResponsePart( + name: "changeBackgroundColor", + response: JSONObject(), + functionId: id + ) } - return FunctionResponsePart( - name: "clearBackgroundColor", - response: JSONObject(), - functionId: id - ) - } + private func clearBackgroundColor(id: String?) -> FunctionResponsePart { + withAnimation { + backgroundColor = nil + } + + return FunctionResponsePart( + name: "clearBackgroundColor", + response: JSONObject(), + functionId: id + ) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift index ab607fd4b..03621d6fc 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift @@ -12,85 +12,85 @@ // See the License for the specific language governing permissions and // limitations under the License. -import SwiftUI -import Foundation import Combine +import Foundation +import SwiftUI @MainActor class TypeWriterViewModel: ObservableObject { - @Published - var text: String = "" + @Published + var text: String = "" - /// How long to wait (in milliseconds) between showing the next character. - var delay: Int = 65 + /// How long to wait (in milliseconds) between showing the next character. + var delay: Int = 65 - private var pendingText = [Character]() - private var processTextTask: Task? + private var pendingText = [Character]() + private var processTextTask: Task? - init() { - processTask() - } + init() { + processTask() + } - deinit { - processTextTask?.cancel() - } + deinit { + processTextTask?.cancel() + } - /// Queues text to show. - /// - /// Since the text is queued, the text wont be displayed until the previous - /// pending text is populated. - func appendText(_ text: String) { - pendingText.append(contentsOf: text) - } + /// Queues text to show. + /// + /// Since the text is queued, the text wont be displayed until the previous + /// pending text is populated. + func appendText(_ text: String) { + pendingText.append(contentsOf: text) + } - /// Clears any text from the queue that is pending being added to the text. - func clearPending() { - pendingText.removeAll() - } + /// Clears any text from the queue that is pending being added to the text. + func clearPending() { + pendingText.removeAll() + } - /// Restarts the class to be a fresh instance. - /// - /// Effectively, this removes all the currently tracked text, - /// and any pending text. - func restart() { - clearPending() - text = "" - } + /// Restarts the class to be a fresh instance. + /// + /// Effectively, this removes all the currently tracked text, + /// and any pending text. + func restart() { + clearPending() + text = "" + } - /// Long running task for processing characters. - private func processTask() { - processTextTask = Task { - var delay = delay - while !Task.isCancelled { - try? await Task.sleep(for: .milliseconds(delay)) + /// Long running task for processing characters. + private func processTask() { + processTextTask = Task { + var delay = delay + while !Task.isCancelled { + try? await Task.sleep(for: .milliseconds(delay)) - delay = processNextCharacter() - } + delay = processNextCharacter() + } + } } - } - /// Determines the delay for the next character, adding pending text as needed. - /// - /// We don't have a delay when outputting whitespace or the end of a sentence. - /// - /// - Returns: The MS delay before working on the next character in the queue. - private func processNextCharacter() -> Int { - guard !pendingText.isEmpty else { - return delay // Default delay if no text is pending - } + /// Determines the delay for the next character, adding pending text as needed. + /// + /// We don't have a delay when outputting whitespace or the end of a sentence. + /// + /// - Returns: The MS delay before working on the next character in the queue. + private func processNextCharacter() -> Int { + guard !pendingText.isEmpty else { + return delay // Default delay if no text is pending + } - let char = pendingText.removeFirst() - text.append(char) + let char = pendingText.removeFirst() + text.append(char) - return (char.isWhitespace || char.isEndOfSentence) ? 0 : delay - } + return (char.isWhitespace || char.isEndOfSentence) ? 0 : delay + } } extension Character { - /// Marker for punctuation that dictates the end of a sentence. - /// - /// Namely, this checks for `.`, `!` and `?`. - var isEndOfSentence: Bool { - self == "." || self == "!" || self == "?" - } + /// Marker for punctuation that dictates the end of a sentence. + /// + /// Namely, this checks for `.`, `!` and `?`. + var isEndOfSentence: Bool { + self == "." || self == "!" || self == "?" + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift index 02c8d40f3..68109cf9a 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift @@ -15,25 +15,25 @@ import SwiftUI struct AudioOutputToggle: View { - @Binding var isEnabled: Bool - var onChange: () -> Void = {} + @Binding var isEnabled: Bool + var onChange: () -> Void = {} - var body: some View { - VStack(alignment: .leading, spacing: 5) { - Toggle("Audio Output", isOn: $isEnabled).onChange(of: isEnabled) { _, _ in - onChange() - } + var body: some View { + VStack(alignment: .leading, spacing: 5) { + Toggle("Audio Output", isOn: $isEnabled).onChange(of: isEnabled) { _, _ in + onChange() + } - Text(""" - Audio output works best on physical devices. Enable this to test playback in the \ - simulator. Headphones recommended. - """) - .font(.caption) - .foregroundStyle(.secondary) + Text(""" + Audio output works best on physical devices. Enable this to test playback in the \ + simulator. Headphones recommended. + """) + .font(.caption) + .foregroundStyle(.secondary) + } } - } } #Preview { - AudioOutputToggle(isEnabled: .constant(false)) + AudioOutputToggle(isEnabled: .constant(false)) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift index e4ed9ef05..20eb341eb 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift @@ -15,111 +15,111 @@ import SwiftUI struct ConnectButton: View { - var state: LiveViewModelState - var onConnect: () async -> Void - var onDisconnect: () async -> Void + var state: LiveViewModelState + var onConnect: () async -> Void + var onDisconnect: () async -> Void - @State private var gradientAngle: Angle = .zero + @State private var gradientAngle: Angle = .zero - private var isConnected: Bool { state == .connected } + private var isConnected: Bool { state == .connected } - private var title: String { - switch state { - case .connected: "Stop" - case .connecting: "Connecting..." - case .idle: "Start" + private var title: String { + switch state { + case .connected: "Stop" + case .connecting: "Connecting..." + case .idle: "Start" + } } - } - private var image: String { - switch state { - case .connected: "stop" - case .connecting: "wifi" - case .idle: "play" + private var image: String { + switch state { + case .connected: "stop" + case .connecting: "wifi" + case .idle: "play" + } } - } - var body: some View { - Button(action: onClick) { - Label(title, systemImage: image) - .frame(maxWidth: .infinity) - .padding() + var body: some View { + Button(action: onClick) { + Label(title, systemImage: image) + .frame(maxWidth: .infinity) + .padding() + } + .buttonStyle(.connect(state: state, gradientAngle: gradientAngle)) + .onAppear { + withAnimation(.linear(duration: 5).repeatForever(autoreverses: false)) { + self.gradientAngle = .degrees(360) + } + } } - .buttonStyle(.connect(state: state, gradientAngle: gradientAngle)) - .onAppear { - withAnimation(.linear(duration: 5).repeatForever(autoreverses: false)) { - self.gradientAngle = .degrees(360) - } - } - } - private func onClick() { - Task { - if isConnected { - await onDisconnect() - } else { - await onConnect() - } + private func onClick() { + Task { + if isConnected { + await onDisconnect() + } else { + await onConnect() + } + } } - } } struct ConnectButtonStyle: ButtonStyle { - var state: LiveViewModelState - var gradientAngle: Angle + var state: LiveViewModelState + var gradientAngle: Angle - private var color: Color { - switch state { - case .connected: Color(.systemRed) - case .connecting: Color.secondary - case .idle: Color.accentColor + private var color: Color { + switch state { + case .connected: Color(.systemRed) + case .connecting: Color.secondary + case .idle: Color.accentColor + } } - } - private var gradientColors: [Color] { - switch state { - case .connected: [Color(.systemRed)] - case .connecting: [.secondary, .white] - case .idle: [ - Color(.systemRed), - Color(.systemBlue), - Color(.systemGreen), - Color(.systemYellow), - Color(.systemRed), - ] + private var gradientColors: [Color] { + switch state { + case .connected: [Color(.systemRed)] + case .connecting: [.secondary, .white] + case .idle: [ + Color(.systemRed), + Color(.systemBlue), + Color(.systemGreen), + Color(.systemYellow), + Color(.systemRed), + ] + } } - } - func makeBody(configuration: Configuration) -> some View { - configuration.label - .disabled(state == .connecting) - .overlay( - RoundedRectangle(cornerRadius: 35) - .stroke( - AngularGradient( - gradient: Gradient(colors: gradientColors), - center: .center, - startAngle: gradientAngle, - endAngle: gradientAngle + .degrees(360) - ), - lineWidth: 3 - ) - ) - .foregroundStyle(color) - } + func makeBody(configuration: Configuration) -> some View { + configuration.label + .disabled(state == .connecting) + .overlay( + RoundedRectangle(cornerRadius: 35) + .stroke( + AngularGradient( + gradient: Gradient(colors: gradientColors), + center: .center, + startAngle: gradientAngle, + endAngle: gradientAngle + .degrees(360) + ), + lineWidth: 3 + ) + ) + .foregroundStyle(color) + } } extension ButtonStyle where Self == ConnectButtonStyle { - static func connect(state: LiveViewModelState, gradientAngle: Angle) -> ConnectButtonStyle { - ConnectButtonStyle(state: state, gradientAngle: gradientAngle) - } + static func connect(state: LiveViewModelState, gradientAngle: Angle) -> ConnectButtonStyle { + ConnectButtonStyle(state: state, gradientAngle: gradientAngle) + } } #Preview { - VStack(spacing: 30) { - ConnectButton(state: .idle, onConnect: {}, onDisconnect: {}) - ConnectButton(state: .connecting, onConnect: {}, onDisconnect: {}) - ConnectButton(state: .connected, onConnect: {}, onDisconnect: {}) - } - .padding(.horizontal) + VStack(spacing: 30) { + ConnectButton(state: .idle, onConnect: {}, onDisconnect: {}) + ConnectButton(state: .connecting, onConnect: {}, onDisconnect: {}) + ConnectButton(state: .connected, onConnect: {}, onDisconnect: {}) + } + .padding(.horizontal) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift index 1c36733a4..7825fbeb1 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift @@ -15,57 +15,57 @@ import SwiftUI struct ModelAvatar: View { - var isConnected = false + var isConnected = false - @State private var gradientAngle: Angle = .zero + @State private var gradientAngle: Angle = .zero - var colors: [Color] { - if isConnected { - [.red, .blue, .green, .yellow, .red] - } else { - [Color(red: 0.5, green: 0.5, blue: 0.5, opacity: 0.3)] + var colors: [Color] { + if isConnected { + [.red, .blue, .green, .yellow, .red] + } else { + [Color(red: 0.5, green: 0.5, blue: 0.5, opacity: 0.3)] + } } - } - var body: some View { - Image("gemini-logo") - .resizable() - .aspectRatio(contentMode: .fit) - .padding() - .colorMultiply(.black) - .maskedOverlay { - AngularGradient( - gradient: Gradient(colors: colors), - center: .leading, - startAngle: gradientAngle, - endAngle: gradientAngle + .degrees(360) - ) - } - .onAppear { - withAnimation(.linear(duration: 10).repeatForever(autoreverses: false)) { - self.gradientAngle = .degrees(360) - } - } - } + var body: some View { + Image("gemini-logo") + .resizable() + .aspectRatio(contentMode: .fit) + .padding() + .colorMultiply(.black) + .maskedOverlay { + AngularGradient( + gradient: Gradient(colors: colors), + center: .leading, + startAngle: gradientAngle, + endAngle: gradientAngle + .degrees(360) + ) + } + .onAppear { + withAnimation(.linear(duration: 10).repeatForever(autoreverses: false)) { + self.gradientAngle = .degrees(360) + } + } + } } extension View { - /// Creates an overlay which takes advantage of a mask to respect the size of the view. - /// - /// Especially useful when you want to create an overlay of an view with a non standard - /// size. - @ViewBuilder - func maskedOverlay(mask: () -> some View) -> some View { - overlay { - mask() - .mask { self } + /// Creates an overlay which takes advantage of a mask to respect the size of the view. + /// + /// Especially useful when you want to create an overlay of an view with a non standard + /// size. + @ViewBuilder + func maskedOverlay(mask: () -> some View) -> some View { + overlay { + mask() + .mask { self } + } } - } } #Preview { - VStack { - ModelAvatar(isConnected: true) - ModelAvatar(isConnected: false) - } + VStack { + ModelAvatar(isConnected: true) + ModelAvatar(isConnected: false) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift index 134d44df5..3df7f5d89 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift @@ -15,31 +15,31 @@ import SwiftUI struct TranscriptView: View { - @ObservedObject var typewriter: TypeWriterViewModel + @ObservedObject var typewriter: TypeWriterViewModel - var body: some View { - ScrollViewReader { proxy in - ScrollView { - Text(typewriter.text) - .font(.title3) - .frame(maxWidth: .infinity, alignment: .leading) - .transition(.opacity) - .padding(.horizontal) - .id("transcript") - } - .onChange(of: typewriter.text, initial: false) { old, new in - proxy.scrollTo("transcript", anchor: .bottom) - } + var body: some View { + ScrollViewReader { proxy in + ScrollView { + Text(typewriter.text) + .font(.title3) + .frame(maxWidth: .infinity, alignment: .leading) + .transition(.opacity) + .padding(.horizontal) + .id("transcript") + } + .onChange(of: typewriter.text, initial: false) { _, _ in + proxy.scrollTo("transcript", anchor: .bottom) + } + } } - } } #Preview { - let vm = TypeWriterViewModel() - TranscriptView(typewriter: vm) - .onAppear { - vm.appendText( - "The sky is blue primarily because of a phenomenon called Rayleigh scattering, where tiny molecules of gas (mainly nitrogen and oxygen) in Earth's atmosphere scatter sunlight in all directions." - ) - } + let vm = TypeWriterViewModel() + TranscriptView(typewriter: vm) + .onAppear { + vm.appendText( + "The sky is blue primarily because of a phenomenon called Rayleigh scattering, where tiny molecules of gas (mainly nitrogen and oxygen) in Earth's atmosphere scatter sunlight in all directions." + ) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift index c4502a567..3a7dcf109 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift @@ -13,283 +13,280 @@ // limitations under the License. import Foundation -import SwiftUI import PhotosUI +import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit public enum MultimodalAttachmentError: LocalizedError { - case unsupportedFileType(extension: String) - case noDataAvailable - case loadingFailed(Error) - case mimeTypeMismatch(expected: String, provided: String, extension: String) - - public var errorDescription: String? { - switch self { - case let .unsupportedFileType(ext): - return "Unsupported file format: .\(ext). Please select a supported format file." - case .noDataAvailable: - return "File data is not available" - case let .loadingFailed(error): - return "File loading failed: \(error.localizedDescription)" - case let .mimeTypeMismatch(expected, provided, ext): - return "MIME type mismatch for .\(ext) file: expected '\(expected)', got '\(provided)'" + case unsupportedFileType(extension: String) + case noDataAvailable + case loadingFailed(Error) + case mimeTypeMismatch(expected: String, provided: String, extension: String) + + public var errorDescription: String? { + switch self { + case let .unsupportedFileType(ext): + return "Unsupported file format: .\(ext). Please select a supported format file." + case .noDataAvailable: + return "File data is not available" + case let .loadingFailed(error): + return "File loading failed: \(error.localizedDescription)" + case let .mimeTypeMismatch(expected, provided, ext): + return "MIME type mismatch for .\(ext) file: expected '\(expected)', got '\(provided)'" + } } - } } // MultimodalAttachment is a struct used for transporting data between ViewModels and AttachmentPreviewCard public struct MultimodalAttachment: Attachment, Equatable { - public let id = UUID() - public let mimeType: String - public let data: Data? - public let url: URL? - public var isCloudStorage: Bool = false - - public static func == (lhs: MultimodalAttachment, rhs: MultimodalAttachment) -> Bool { - return lhs.id == rhs.id - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(id) - } - - public init(mimeType: String, data: Data? = nil, url: URL? = nil) { - self.mimeType = mimeType - self.data = data - self.url = url - } - - public init(fileDataPart: FileDataPart) { - mimeType = fileDataPart.mimeType - data = nil - url = URL(string: fileDataPart.uri) - isCloudStorage = true - } + public let id = UUID() + public let mimeType: String + public let data: Data? + public let url: URL? + public var isCloudStorage: Bool = false + + public static func == (lhs: MultimodalAttachment, rhs: MultimodalAttachment) -> Bool { + return lhs.id == rhs.id + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(id) + } + + public init(mimeType: String, data: Data? = nil, url: URL? = nil) { + self.mimeType = mimeType + self.data = data + self.url = url + } + + public init(fileDataPart: FileDataPart) { + mimeType = fileDataPart.mimeType + data = nil + url = URL(string: fileDataPart.uri) + isCloudStorage = true + } } extension MultimodalAttachment: View { - public var body: some View { - AttachmentPreviewCard(attachment: self) - } + public var body: some View { + AttachmentPreviewCard(attachment: self) + } } // validate file type & mime type -extension MultimodalAttachment { - public static let supportedFileExtensions: Set = [ - // Images - "png", "jpeg", "webp", - // Video - "flv", "mov", "mpeg", "mpegps", "mpg", "mp4", "webm", "wmv", "3gpp", - // Audio - "aac", "flac", "mp3", "mpa", "mpeg", "mpga", "mp4", "opus", "pcm", "wav", "webm", - // Documents - "pdf", "txt", - ] - - public static func validateFileType(url: URL) throws { - let fileExtension = url.pathExtension.lowercased() - guard !fileExtension.isEmpty else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") - } - - guard supportedFileExtensions.contains(fileExtension) else { - throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) +public extension MultimodalAttachment { + static let supportedFileExtensions: Set = [ + // Images + "png", "jpeg", "webp", + // Video + "flv", "mov", "mpeg", "mpegps", "mpg", "mp4", "webm", "wmv", "3gpp", + // Audio + "aac", "flac", "mp3", "mpa", "mpeg", "mpga", "mp4", "opus", "pcm", "wav", "webm", + // Documents + "pdf", "txt", + ] + + static func validateFileType(url: URL) throws { + let fileExtension = url.pathExtension.lowercased() + guard !fileExtension.isEmpty else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") + } + + guard supportedFileExtensions.contains(fileExtension) else { + throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) + } } - } - public static func validateMimeTypeMatch(url: URL, mimeType: String) throws { - let expectedMimeType = getMimeType(for: url) + static func validateMimeTypeMatch(url: URL, mimeType: String) throws { + let expectedMimeType = getMimeType(for: url) - guard mimeType == expectedMimeType else { - throw MultimodalAttachmentError.mimeTypeMismatch( - expected: expectedMimeType, - provided: mimeType, - extension: url.pathExtension - ) + guard mimeType == expectedMimeType else { + throw MultimodalAttachmentError.mimeTypeMismatch( + expected: expectedMimeType, + provided: mimeType, + extension: url.pathExtension + ) + } } - } - public static func validatePhotoType(_ item: PhotosPickerItem) throws -> String { - guard let fileExtension = item.supportedContentTypes.first?.preferredFilenameExtension else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") - } + static func validatePhotoType(_ item: PhotosPickerItem) throws -> String { + guard let fileExtension = item.supportedContentTypes.first?.preferredFilenameExtension else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") + } - guard supportedFileExtensions.contains(fileExtension) else { - throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) - } + guard supportedFileExtensions.contains(fileExtension) else { + throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) + } - guard let fileMimeType = item.supportedContentTypes.first?.preferredMIMEType else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No MIME type") - } + guard let fileMimeType = item.supportedContentTypes.first?.preferredMIMEType else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No MIME type") + } - return fileMimeType - } + return fileMimeType + } } // load data from picker item or url -extension MultimodalAttachment { - public static func fromPhotosPickerItem(_ item: PhotosPickerItem) async throws - -> MultimodalAttachment { - let fileMimeType = try validatePhotoType(item) - - do { - guard let data = try await item.loadTransferable(type: Data.self) else { - throw MultimodalAttachmentError.noDataAvailable - } - - return MultimodalAttachment( - mimeType: fileMimeType, - data: data - ) - } catch let error as MultimodalAttachmentError { - throw error - } catch { - throw MultimodalAttachmentError.loadingFailed(error) +public extension MultimodalAttachment { + static func fromPhotosPickerItem(_ item: PhotosPickerItem) async throws + -> MultimodalAttachment + { + let fileMimeType = try validatePhotoType(item) + + do { + guard let data = try await item.loadTransferable(type: Data.self) else { + throw MultimodalAttachmentError.noDataAvailable + } + + return MultimodalAttachment( + mimeType: fileMimeType, + data: data + ) + } catch let error as MultimodalAttachmentError { + throw error + } catch { + throw MultimodalAttachmentError.loadingFailed(error) + } } - } - public static func fromFilePickerItem(from url: URL) async throws -> MultimodalAttachment { - try validateFileType(url: url) + static func fromFilePickerItem(from url: URL) async throws -> MultimodalAttachment { + try validateFileType(url: url) - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value - let mimeType = Self.getMimeType(for: url) + let mimeType = Self.getMimeType(for: url) - return MultimodalAttachment( - mimeType: mimeType, - data: data, - url: url - ) - } catch { - throw MultimodalAttachmentError.loadingFailed(error) + return MultimodalAttachment( + mimeType: mimeType, + data: data, + url: url + ) + } catch { + throw MultimodalAttachmentError.loadingFailed(error) + } } - } - - public static func fromURL(_ url: URL, mimeType: String) async throws -> MultimodalAttachment { - try validateFileType(url: url) - try validateMimeTypeMatch(url: url, mimeType: mimeType) - - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value - - return MultimodalAttachment( - mimeType: mimeType, - data: data, - url: url - ) - } catch { - throw MultimodalAttachmentError.loadingFailed(error) - } - } - public func toInlineDataPart() async -> InlineDataPart? { - if let data = data, !data.isEmpty { - return InlineDataPart(data: data, mimeType: mimeType) + static func fromURL(_ url: URL, mimeType: String) async throws -> MultimodalAttachment { + try validateFileType(url: url) + try validateMimeTypeMatch(url: url, mimeType: mimeType) + + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value + + return MultimodalAttachment( + mimeType: mimeType, + data: data, + url: url + ) + } catch { + throw MultimodalAttachmentError.loadingFailed(error) + } } - // If the data is not available, try to read it from the url. - guard let url = url else { return nil } - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value - - guard !data.isEmpty else { return nil } - return InlineDataPart(data: data, mimeType: mimeType) - } catch { - return nil + func toInlineDataPart() async -> InlineDataPart? { + if let data = data, !data.isEmpty { + return InlineDataPart(data: data, mimeType: mimeType) + } + + // If the data is not available, try to read it from the url. + guard let url = url else { return nil } + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value + + guard !data.isEmpty else { return nil } + return InlineDataPart(data: data, mimeType: mimeType) + } catch { + return nil + } } - } - - private static func getMimeType(for url: URL) -> String { - let fileExtension = url.pathExtension.lowercased() - - switch fileExtension { - // Images - case "png": - return "image/png" - case "jpeg": - return "image/jpeg" - case "webp": - return "image/webp" - - // Video - case "flv": - return "video/x-flv" - case "mov": - return "video/quicktime" - case "mpeg": - return "video/mpeg" - case "mpegps": - return "video/mpegps" - case "mpg": - return "video/mpg" - case "mp4": - return "video/mp4" - case "webm": - return "video/webm" - case "wmv": - return "video/wmv" - case "3gpp": - return "video/3gpp" - - // Audio - case "aac": - return "audio/aac" - case "flac": - return "audio/flac" - case "mp3": - return "audio/mp3" - case "mpa": - return "audio/m4a" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .mpeg files are more likely to be video since MP3 files are more likely to use the - // .mp3 file extension. - // case "mpeg": - // return "audio/mpeg" - case "mpga": - return "audio/mpga" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .mp4 files are potentially more likely to be video since AAC and ALAC files - // frequently use the .m4a file extension within the Apple ecosystem, though it is - // still ambiguous whether it is audio or video from the file extension alone. - // case "mp4": - // return "audio/mp4" - case "opus": - return "audio/opus" - case "wav": - return "audio/wav" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .webm files are potentially more likely to be video since WebM files frequently use - // the .weba file extension when they only contain audio (Ogg Vorbis / Opus), though it - // is still ambiguous whether it is audio or video based on the file extension alone. - // case "webm": - // return "audio/webm" - - // Documents / text - case "pdf": - return "application/pdf" - case "txt": - return "text/plain" - - default: - return "application/octet-stream" + + private static func getMimeType(for url: URL) -> String { + let fileExtension = url.pathExtension.lowercased() + + switch fileExtension { + // Images + case "png": + return "image/png" + case "jpeg": + return "image/jpeg" + case "webp": + return "image/webp" + // Video + case "flv": + return "video/x-flv" + case "mov": + return "video/quicktime" + case "mpeg": + return "video/mpeg" + case "mpegps": + return "video/mpegps" + case "mpg": + return "video/mpg" + case "mp4": + return "video/mp4" + case "webm": + return "video/webm" + case "wmv": + return "video/wmv" + case "3gpp": + return "video/3gpp" + // Audio + case "aac": + return "audio/aac" + case "flac": + return "audio/flac" + case "mp3": + return "audio/mp3" + case "mpa": + return "audio/m4a" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .mpeg files are more likely to be video since MP3 files are more likely to use the + // .mp3 file extension. + // case "mpeg": + // return "audio/mpeg" + case "mpga": + return "audio/mpga" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .mp4 files are potentially more likely to be video since AAC and ALAC files + // frequently use the .m4a file extension within the Apple ecosystem, though it is + // still ambiguous whether it is audio or video from the file extension alone. + // case "mp4": + // return "audio/mp4" + case "opus": + return "audio/opus" + case "wav": + return "audio/wav" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .webm files are potentially more likely to be video since WebM files frequently use + // the .weba file extension when they only contain audio (Ogg Vorbis / Opus), though it + // is still ambiguous whether it is audio or video based on the file extension alone. + // case "webm": + // return "audio/webm" + // Documents / text + case "pdf": + return "application/pdf" + case "txt": + return "text/plain" + default: + return "application/octet-stream" + } } - } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift index c3623cc77..0a9fee931 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift @@ -13,187 +13,188 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import SwiftUI -import PhotosUI import ConversationKit +import PhotosUI +import SwiftUI struct MultimodalScreen: View { - let backendType: BackendOption - @StateObject var viewModel: MultimodalViewModel + let backendType: BackendOption + @StateObject var viewModel: MultimodalViewModel - @State private var showingPhotoPicker = false - @State private var showingFilePicker = false - @State private var showingLinkDialog = false - @State private var linkText = "" - @State private var linkMimeType = "" - @State private var selectedPhotoItems = [PhotosPickerItem]() + @State private var showingPhotoPicker = false + @State private var showingFilePicker = false + @State private var showingLinkDialog = false + @State private var linkText = "" + @State private var linkMimeType = "" + @State private var selectedPhotoItems = [PhotosPickerItem]() - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: MultimodalViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: MultimodalViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - attachments: $viewModel.attachments, - userPrompt: viewModel.initialPrompt) { message in - MessageView(message: message) - } - .attachmentActions { - Button(action: showLinkDialog) { - Label("Link", systemImage: "link") - } - Button(action: showFilePicker) { - Label("File", systemImage: "doc.text") - } - Button(action: showPhotoPicker) { - Label("Photo", systemImage: "photo.on.rectangle.angled") - } - } - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { error in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .photosPicker( - isPresented: $showingPhotoPicker, - selection: $selectedPhotoItems, - maxSelectionCount: 5, - matching: .any(of: [.images, .videos]) - ) - .fileImporter( - isPresented: $showingFilePicker, - allowedContentTypes: [.pdf, .audio], - allowsMultipleSelection: true - ) { result in - handleFileImport(result) - } - .alert("Add Web URL", isPresented: $showingLinkDialog) { - TextField("Enter URL", text: $linkText) - TextField("Enter mimeType", text: $linkMimeType) - Button("Add") { - handleLinkAttachment() + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + attachments: $viewModel.attachments, + userPrompt: viewModel.initialPrompt) + { message in + MessageView(message: message) + } + .attachmentActions { + Button(action: showLinkDialog) { + Label("Link", systemImage: "link") + } + Button(action: showFilePicker) { + Label("File", systemImage: "doc.text") + } + Button(action: showPhotoPicker) { + Label("Photo", systemImage: "photo.on.rectangle.angled") + } + } + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { _ in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .photosPicker( + isPresented: $showingPhotoPicker, + selection: $selectedPhotoItems, + maxSelectionCount: 5, + matching: .any(of: [.images, .videos]) + ) + .fileImporter( + isPresented: $showingFilePicker, + allowedContentTypes: [.pdf, .audio], + allowsMultipleSelection: true + ) { result in + handleFileImport(result) + } + .alert("Add Web URL", isPresented: $showingLinkDialog) { + TextField("Enter URL", text: $linkText) + TextField("Enter mimeType", text: $linkMimeType) + Button("Add") { + handleLinkAttachment() + } + Button("Cancel", role: .cancel) { + linkText = "" + linkMimeType = "" + } + } } - Button("Cancel", role: .cancel) { - linkText = "" - linkMimeType = "" + .onChange(of: selectedPhotoItems) { _, newItems in + handlePhotoSelection(newItems) } - } - } - .onChange(of: selectedPhotoItems) { _, newItems in - handlePhotoSelection(newItems) - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } } - } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) - } - - private func newChat() { - viewModel.startNewChat() - } - private func showPhotoPicker() { - showingPhotoPicker = true - } + private func newChat() { + viewModel.startNewChat() + } - private func showFilePicker() { - showingFilePicker = true - } + private func showPhotoPicker() { + showingPhotoPicker = true + } - private func showLinkDialog() { - showingLinkDialog = true - } + private func showFilePicker() { + showingFilePicker = true + } - private func handlePhotoSelection(_ items: [PhotosPickerItem]) { - Task { - for item in items { - do { - let attachment = try await MultimodalAttachment.fromPhotosPickerItem(item) - await MainActor.run { - viewModel.addAttachment(attachment) - } - } catch { - await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true - } - } - } - await MainActor.run { - selectedPhotoItems = [] - } + private func showLinkDialog() { + showingLinkDialog = true } - } - private func handleFileImport(_ result: Result<[URL], Error>) { - switch result { - case let .success(urls): - Task { - for url in urls { - do { - let attachment = try await MultimodalAttachment.fromFilePickerItem(from: url) - await MainActor.run { - viewModel.addAttachment(attachment) + private func handlePhotoSelection(_ items: [PhotosPickerItem]) { + Task { + for item in items { + do { + let attachment = try await MultimodalAttachment.fromPhotosPickerItem(item) + await MainActor.run { + viewModel.addAttachment(attachment) + } + } catch { + await MainActor.run { + viewModel.error = error + viewModel.presentErrorDetails = true + } + } } - } catch { await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true + selectedPhotoItems = [] } - } } - } - case let .failure(error): - viewModel.error = error - viewModel.presentErrorDetails = true } - } - private func handleLinkAttachment() { - guard !linkText.isEmpty, let url = URL(string: linkText) else { - return + private func handleFileImport(_ result: Result<[URL], Error>) { + switch result { + case let .success(urls): + Task { + for url in urls { + do { + let attachment = try await MultimodalAttachment.fromFilePickerItem(from: url) + await MainActor.run { + viewModel.addAttachment(attachment) + } + } catch { + await MainActor.run { + viewModel.error = error + viewModel.presentErrorDetails = true + } + } + } + } + case let .failure(error): + viewModel.error = error + viewModel.presentErrorDetails = true + } } - let trimmedMime = linkMimeType.lowercased().trimmingCharacters(in: .whitespacesAndNewlines) - Task { - do { - let attachment = try await MultimodalAttachment.fromURL(url, mimeType: trimmedMime) - await MainActor.run { - viewModel.addAttachment(attachment) + private func handleLinkAttachment() { + guard !linkText.isEmpty, let url = URL(string: linkText) else { + return } - } catch { - await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true + + let trimmedMime = linkMimeType.lowercased().trimmingCharacters(in: .whitespacesAndNewlines) + Task { + do { + let attachment = try await MultimodalAttachment.fromURL(url, mimeType: trimmedMime) + await MainActor.run { + viewModel.addAttachment(attachment) + } + } catch { + await MainActor.run { + viewModel.error = error + viewModel.presentErrorDetails = true + } + } + await MainActor.run { + linkText = "" + linkMimeType = "" + } } - } - await MainActor.run { - linkText = "" - linkMimeType = "" - } } - } } #Preview { - MultimodalScreen(backendType: .googleAI) + MultimodalScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift index cb13cb694..b5a861ee2 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift @@ -13,211 +13,211 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif +import AVFoundation +import Combine +import ConversationKit import Foundation import OSLog import PhotosUI import SwiftUI -import AVFoundation -import Combine -import ConversationKit @MainActor class MultimodalViewModel: ObservableObject { - @Published var messages = [ChatMessage]() - @Published var initialPrompt: String = "" - @Published var title: String = "" - @Published var error: Error? - @Published var inProgress = false - - @Published var presentErrorDetails: Bool = false - - @Published var attachments = [MultimodalAttachment]() - - private var model: GenerativeModel - private var chat: Chat - private var chatTask: Task? - private let logger = Logger(subsystem: "com.example.firebaseai", category: "MultimodalViewModel") - - private var sample: Sample? - private var backendType: BackendOption - private var fileDataParts: [FileDataPart]? - - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType - - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) - - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - systemInstruction: sample?.systemInstruction - ) - - if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { - messages = ChatMessage.from(chatHistory) - chat = model.startChat(history: chatHistory) - } else { - chat = model.startChat() - } + @Published var messages = [ChatMessage]() + @Published var initialPrompt: String = "" + @Published var title: String = "" + @Published var error: Error? + @Published var inProgress = false - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" + @Published var presentErrorDetails: Bool = false - fileDataParts = sample?.fileDataParts - if let fileDataParts = fileDataParts, !fileDataParts.isEmpty { - for fileDataPart in fileDataParts { - attachments.append(MultimodalAttachment(fileDataPart: fileDataPart)) - } - } - } - - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) - } - } - - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - attachments.removeAll() - initialPrompt = "" - } - - func stop() { - chatTask?.cancel() - error = nil - } - - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - inProgress = true - defer { - inProgress = false - } - - let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) - messages.append(userMessage) - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var parts: [any PartsRepresentable] = [text] - - if backendType == .vertexAI, let fileDataParts = fileDataParts { - // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. - // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. - // if you do not want to use Cloud Storage, you can remove this `if` statement. - // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage - for fileDataPart in fileDataParts { - parts.append(fileDataPart) - } + @Published var attachments = [MultimodalAttachment]() + + private var model: GenerativeModel + private var chat: Chat + private var chatTask: Task? + private let logger = Logger(subsystem: "com.example.firebaseai", category: "MultimodalViewModel") + + private var sample: Sample? + private var backendType: BackendOption + private var fileDataParts: [FileDataPart]? + + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType + + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) + + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + systemInstruction: sample?.systemInstruction + ) + + if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { + messages = ChatMessage.from(chatHistory) + chat = model.startChat(history: chatHistory) } else { - for attachment in attachments { - if let inlineDataPart = await attachment.toInlineDataPart() { - parts.append(inlineDataPart) - } - } + chat = model.startChat() } - attachments.removeAll() + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" - let responseStream = try chat.sendMessageStream(parts) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } + fileDataParts = sample?.fileDataParts + if let fileDataParts = fileDataParts, !fileDataParts.isEmpty { + for fileDataPart in fileDataParts { + attachments.append(MultimodalAttachment(fileDataPart: fileDataPart)) + } } - } catch { - self.error = error - logger.error("\(error.localizedDescription)") - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } - - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - inProgress = true - defer { - inProgress = false - } - let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) - messages.append(userMessage) - - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var parts: [any PartsRepresentable] = [text] - - if backendType == .vertexAI, let fileDataParts = fileDataParts { - // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. - // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. - // if you do not want to use Cloud Storage, you can remove this `if` statement. - // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage - for fileDataPart in fileDataParts { - parts.append(fileDataPart) - } + + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) } else { - for attachment in attachments { - if let inlineDataPart = await attachment.toInlineDataPart() { - parts.append(inlineDataPart) - } - } + await internalSendMessage(text) } + } + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() attachments.removeAll() + initialPrompt = "" + } - let response = try await chat.sendMessage(parts) + func stop() { + chatTask?.cancel() + error = nil + } - if let responseText = response.text { - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + inProgress = true + defer { + inProgress = false + } + + let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) + messages.append(userMessage) + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var parts: [any PartsRepresentable] = [text] + + if backendType == .vertexAI, let fileDataParts = fileDataParts { + // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. + // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. + // if you do not want to use Cloud Storage, you can remove this `if` statement. + // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage + for fileDataPart in fileDataParts { + parts.append(fileDataPart) + } + } else { + for attachment in attachments { + if let inlineDataPart = await attachment.toInlineDataPart() { + parts.append(inlineDataPart) + } + } + } + + attachments.removeAll() + + let responseStream = try chat.sendMessageStream(parts) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } + } + } catch { + self.error = error + logger.error("\(error.localizedDescription)") + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } - } catch { - self.error = error - logger.error("\(error.localizedDescription)") - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } } - } - func addAttachment(_ attachment: MultimodalAttachment) { - attachments.append(attachment) - } + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - func removeAttachment(_ attachment: MultimodalAttachment) { - if attachment.isCloudStorage { - // Remove corresponding fileDataPart when attachment is deleted. - fileDataParts?.removeAll { $0.uri == attachment.url?.absoluteString } + chatTask = Task { + inProgress = true + defer { + inProgress = false + } + let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) + messages.append(userMessage) + + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var parts: [any PartsRepresentable] = [text] + + if backendType == .vertexAI, let fileDataParts = fileDataParts { + // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. + // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. + // if you do not want to use Cloud Storage, you can remove this `if` statement. + // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage + for fileDataPart in fileDataParts { + parts.append(fileDataPart) + } + } else { + for attachment in attachments { + if let inlineDataPart = await attachment.toInlineDataPart() { + parts.append(inlineDataPart) + } + } + } + + attachments.removeAll() + + let response = try await chat.sendMessage(parts) + + if let responseText = response.text { + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + } + } catch { + self.error = error + logger.error("\(error.localizedDescription)") + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } + } + } + + func addAttachment(_ attachment: MultimodalAttachment) { + attachments.append(attachment) } - attachments.removeAll { $0.id == attachment.id } - } + func removeAttachment(_ attachment: MultimodalAttachment) { + if attachment.isCloudStorage { + // Remove corresponding fileDataPart when attachment is deleted. + fileDataParts?.removeAll { $0.uri == attachment.url?.absoluteString } + } + + attachments.removeAll { $0.id == attachment.id } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift index 5ba537de0..fa922a6d5 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift @@ -15,157 +15,157 @@ import SwiftUI private enum AttachmentType: String { - case image, video, audio, pdf, other - - init(mimeType: String) { - let mt = mimeType.lowercased() - if mt.hasPrefix("image/") { self = .image } - else if mt.hasPrefix("video/") { self = .video } - else if mt.hasPrefix("audio/") { self = .audio } - else if mt == "application/pdf" { self = .pdf } - else { self = .other } - } - - var systemImageName: String { - switch self { - case .image: return "photo" - case .video: return "video" - case .audio: return "waveform" - case .pdf: return "doc.text" - case .other: return "questionmark" + case image, video, audio, pdf, other + + init(mimeType: String) { + let mt = mimeType.lowercased() + if mt.hasPrefix("image/") { self = .image } + else if mt.hasPrefix("video/") { self = .video } + else if mt.hasPrefix("audio/") { self = .audio } + else if mt == "application/pdf" { self = .pdf } + else { self = .other } } - } - - var typeTagColor: Color { - switch self { - case .image: return .green - case .video: return .purple - case .audio: return .orange - case .pdf: return .red - case .other: return .blue + + var systemImageName: String { + switch self { + case .image: return "photo" + case .video: return "video" + case .audio: return "waveform" + case .pdf: return "doc.text" + case .other: return "questionmark" + } + } + + var typeTagColor: Color { + switch self { + case .image: return .green + case .video: return .purple + case .audio: return .orange + case .pdf: return .red + case .other: return .blue + } } - } - - var displayFileType: String { - switch self { - case .image: return "IMAGE" - case .video: return "VIDEO" - case .audio: return "AUDIO" - case .pdf: return "PDF" - case .other: return "UNKNOWN" + + var displayFileType: String { + switch self { + case .image: return "IMAGE" + case .video: return "VIDEO" + case .audio: return "AUDIO" + case .pdf: return "PDF" + case .other: return "UNKNOWN" + } } - } } struct AttachmentPreviewCard: View { - let attachment: MultimodalAttachment - - private var attachmentType: AttachmentType { - AttachmentType(mimeType: attachment.mimeType) - } - - var body: some View { - HStack(spacing: 12) { - Image(systemName: attachmentType.systemImageName) - .font(.system(size: 20)) - .foregroundColor(.blue) - .frame(width: 40, height: 40) - .background(Color.blue.opacity(0.1)) - .clipShape(RoundedRectangle(cornerRadius: 6)) - - VStack(alignment: .leading, spacing: 4) { - Text(displayName) - .font(.system(size: 14, weight: .medium)) - .lineLimit(1) - .truncationMode(.middle) - .foregroundColor(.primary) - - HStack(spacing: 8) { - Text(attachmentType.displayFileType) - .font(.system(size: 10, weight: .semibold)) - .padding(.horizontal, 6) - .padding(.vertical, 2) - .background(attachmentType.typeTagColor) - .foregroundColor(.white) - .clipShape(Capsule()) - - Spacer() - } - } + let attachment: MultimodalAttachment + + private var attachmentType: AttachmentType { + AttachmentType(mimeType: attachment.mimeType) } - .frame(width: 180) - .padding(12) - .background(Color(.systemGray6)) - .clipShape(RoundedRectangle(cornerRadius: 12)) - .overlay( - RoundedRectangle(cornerRadius: 12) - .stroke(Color(.separator), lineWidth: 0.5) - ) - } - - private var displayName: String { - let fileName = attachment.url?.lastPathComponent ?? "Default" - let maxLength = 30 - if fileName.count <= maxLength { - return fileName + + var body: some View { + HStack(spacing: 12) { + Image(systemName: attachmentType.systemImageName) + .font(.system(size: 20)) + .foregroundColor(.blue) + .frame(width: 40, height: 40) + .background(Color.blue.opacity(0.1)) + .clipShape(RoundedRectangle(cornerRadius: 6)) + + VStack(alignment: .leading, spacing: 4) { + Text(displayName) + .font(.system(size: 14, weight: .medium)) + .lineLimit(1) + .truncationMode(.middle) + .foregroundColor(.primary) + + HStack(spacing: 8) { + Text(attachmentType.displayFileType) + .font(.system(size: 10, weight: .semibold)) + .padding(.horizontal, 6) + .padding(.vertical, 2) + .background(attachmentType.typeTagColor) + .foregroundColor(.white) + .clipShape(Capsule()) + + Spacer() + } + } + } + .frame(width: 180) + .padding(12) + .background(Color(.systemGray6)) + .clipShape(RoundedRectangle(cornerRadius: 12)) + .overlay( + RoundedRectangle(cornerRadius: 12) + .stroke(Color(.separator), lineWidth: 0.5) + ) } - let prefixName = fileName.prefix(15) - let suffixName = fileName.suffix(10) - return "\(prefixName)...\(suffixName)" - } + private var displayName: String { + let fileName = attachment.url?.lastPathComponent ?? "Default" + let maxLength = 30 + if fileName.count <= maxLength { + return fileName + } + + let prefixName = fileName.prefix(15) + let suffixName = fileName.suffix(10) + return "\(prefixName)...\(suffixName)" + } } struct AttachmentPreviewScrollView: View { - let attachments: [MultimodalAttachment] - - var body: some View { - if !attachments.isEmpty { - ScrollView(.horizontal, showsIndicators: false) { - HStack { - ForEach(attachments) { attachment in - AttachmentPreviewCard( - attachment: attachment, - ) - } + let attachments: [MultimodalAttachment] + + var body: some View { + if !attachments.isEmpty { + ScrollView(.horizontal, showsIndicators: false) { + HStack { + ForEach(attachments) { attachment in + AttachmentPreviewCard( + attachment: attachment, + ) + } + } + .padding(.horizontal, 8) + } + } else { + EmptyView() } - .padding(.horizontal, 8) - } - } else { - EmptyView() } - } } #Preview { - VStack(spacing: 20) { - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "image/jpeg", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "application/pdf", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "video/mp4", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "audio/mpeg", - data: Data() - ), - ) - } - .padding() + VStack(spacing: 20) { + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "image/jpeg", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "application/pdf", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "video/mp4", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "audio/mpeg", + data: Data() + ), + ) + } + .padding() } diff --git a/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift b/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift index 99194a765..c3011a778 100644 --- a/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift +++ b/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift @@ -17,42 +17,43 @@ import SwiftUI import TipKit class AppDelegate: NSObject, UIApplicationDelegate { - func application(_ application: UIApplication, - didFinishLaunchingWithOptions launchOptions: [UIApplication - .LaunchOptionsKey: Any]? = nil) -> Bool { - // Recommendation: Protect your Vertex AI API resources from abuse by preventing unauthorized - // clients using App Check; see https://firebase.google.com/docs/app-check#get_started. - - FirebaseApp.configure() - - if let firebaseApp = FirebaseApp.app(), firebaseApp.options.projectID == "mockproject-1234" { - guard let bundleID = Bundle.main.bundleIdentifier else { fatalError() } - fatalError(""" - You must create and/or download a valid `GoogleService-Info.plist` file for \(bundleID) from \ - https://console.firebase.google.com to run this example. Replace the existing \ - `GoogleService-Info.plist` file in the `firebaseai` directory with this new file. - """) + func application(_: UIApplication, + didFinishLaunchingWithOptions _: [UIApplication + .LaunchOptionsKey: Any]? = nil) -> Bool + { + // Recommendation: Protect your Vertex AI API resources from abuse by preventing unauthorized + // clients using App Check; see https://firebase.google.com/docs/app-check#get_started. + + FirebaseApp.configure() + + if let firebaseApp = FirebaseApp.app(), firebaseApp.options.projectID == "mockproject-1234" { + guard let bundleID = Bundle.main.bundleIdentifier else { fatalError() } + fatalError(""" + You must create and/or download a valid `GoogleService-Info.plist` file for \(bundleID) from \ + https://console.firebase.google.com to run this example. Replace the existing \ + `GoogleService-Info.plist` file in the `firebaseai` directory with this new file. + """) + } + + return true } - - return true - } } @main struct FirebaseAIExampleApp: App { - @UIApplicationDelegateAdaptor var appDelegate: AppDelegate - - init() { - do { - try Tips.configure() - } catch { - print("Error initializing tips: \(error)") + @UIApplicationDelegateAdaptor var appDelegate: AppDelegate + + init() { + do { + try Tips.configure() + } catch { + print("Error initializing tips: \(error)") + } } - } - var body: some Scene { - WindowGroup { - ContentView() + var body: some Scene { + WindowGroup { + ContentView() + } } - } } diff --git a/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift b/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift index 344a97472..1ea8bd155 100644 --- a/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift +++ b/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift @@ -16,15 +16,15 @@ import Foundation /// Generic error for issues that occur within the application. public struct ApplicationError: Error, Sendable, CustomNSError { - let localizedDescription: String + let localizedDescription: String - init(_ localizedDescription: String) { - self.localizedDescription = localizedDescription - } + init(_ localizedDescription: String) { + self.localizedDescription = localizedDescription + } - public var errorUserInfo: [String: Any] { - [ - NSLocalizedDescriptionKey: localizedDescription, - ] - } + public var errorUserInfo: [String: Any] { + [ + NSLocalizedDescriptionKey: localizedDescription, + ] + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift index 504d09620..3e260d96c 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift @@ -15,77 +15,77 @@ import AVFoundation extension AVAudioPCMBuffer { - /// Creates a new `AVAudioPCMBuffer` from a `Data` struct. - /// - /// Only works with interleaved data. - static func fromInterleavedData(data: Data, format: AVAudioFormat) throws -> AVAudioPCMBuffer? { - guard format.isInterleaved else { - throw ApplicationError("Only interleaved data is supported") - } + /// Creates a new `AVAudioPCMBuffer` from a `Data` struct. + /// + /// Only works with interleaved data. + static func fromInterleavedData(data: Data, format: AVAudioFormat) throws -> AVAudioPCMBuffer? { + guard format.isInterleaved else { + throw ApplicationError("Only interleaved data is supported") + } - let frameCapacity = AVAudioFrameCount(data - .count / Int(format.streamDescription.pointee.mBytesPerFrame)) - guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: frameCapacity) else { - return nil - } + let frameCapacity = AVAudioFrameCount(data + .count / Int(format.streamDescription.pointee.mBytesPerFrame)) + guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: frameCapacity) else { + return nil + } + + buffer.frameLength = frameCapacity + data.withUnsafeBytes { bytes in + guard let baseAddress = bytes.baseAddress else { return } + let dst = buffer.mutableAudioBufferList.pointee.mBuffers + dst.mData?.copyMemory(from: baseAddress, byteCount: Int(dst.mDataByteSize)) + } - buffer.frameLength = frameCapacity - data.withUnsafeBytes { bytes in - guard let baseAddress = bytes.baseAddress else { return } - let dst = buffer.mutableAudioBufferList.pointee.mBuffers - dst.mData?.copyMemory(from: baseAddress, byteCount: Int(dst.mDataByteSize)) + return buffer } - return buffer - } + /// Gets the underlying `Data` in this buffer. + /// + /// Will throw an error if this buffer doesn't hold int16 data. + func int16Data() throws -> Data { + guard let bufferPtr = audioBufferList.pointee.mBuffers.mData else { + throw ApplicationError("Missing audio buffer list") + } - /// Gets the underlying `Data` in this buffer. - /// - /// Will throw an error if this buffer doesn't hold int16 data. - func int16Data() throws -> Data { - guard let bufferPtr = audioBufferList.pointee.mBuffers.mData else { - throw ApplicationError("Missing audio buffer list") + let audioBufferLenth = Int(audioBufferList.pointee.mBuffers.mDataByteSize) + return Data(bytes: bufferPtr, count: audioBufferLenth) } - - let audioBufferLenth = Int(audioBufferList.pointee.mBuffers.mDataByteSize) - return Data(bytes: bufferPtr, count: audioBufferLenth) - } } extension AVAudioConverter { - /// Uses the converter to convert the provided `buffer`. - /// - /// Will handle determining the proper frame capacity, ensuring formats align, and propagating any - /// errors that occur. - /// - /// - Returns: A new buffer, with the converted data. - func convertBuffer(_ buffer: AVAudioPCMBuffer) throws -> AVAudioPCMBuffer { - if buffer.format == outputFormat { return buffer } - guard buffer.format == inputFormat else { - throw ApplicationError("The buffer's format was different than the converter's input format") - } + /// Uses the converter to convert the provided `buffer`. + /// + /// Will handle determining the proper frame capacity, ensuring formats align, and propagating any + /// errors that occur. + /// + /// - Returns: A new buffer, with the converted data. + func convertBuffer(_ buffer: AVAudioPCMBuffer) throws -> AVAudioPCMBuffer { + if buffer.format == outputFormat { return buffer } + guard buffer.format == inputFormat else { + throw ApplicationError("The buffer's format was different than the converter's input format") + } - let frameCapacity = AVAudioFrameCount( - ceil(Double(buffer.frameLength) * outputFormat.sampleRate / inputFormat.sampleRate) - ) + let frameCapacity = AVAudioFrameCount( + ceil(Double(buffer.frameLength) * outputFormat.sampleRate / inputFormat.sampleRate) + ) - guard let output = AVAudioPCMBuffer( - pcmFormat: outputFormat, - frameCapacity: frameCapacity - ) else { - throw ApplicationError("Failed to create output buffer") - } + guard let output = AVAudioPCMBuffer( + pcmFormat: outputFormat, + frameCapacity: frameCapacity + ) else { + throw ApplicationError("Failed to create output buffer") + } - var error: NSError? - convert(to: output, error: &error) { _, status in - status.pointee = .haveData - return buffer - } + var error: NSError? + convert(to: output, error: &error) { _, status in + status.pointee = .haveData + return buffer + } - if let error { - throw ApplicationError("Failed to convert buffer: \(error.localizedDescription)") - } + if let error { + throw ApplicationError("Failed to convert buffer: \(error.localizedDescription)") + } - return output - } + return output + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift index ed224cf48..fe551025a 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift @@ -18,71 +18,71 @@ import OSLog /// Plays back audio through the primary output device. class AudioPlayer { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - private let engine: AVAudioEngine - private let inputFormat: AVAudioFormat - private let outputFormat: AVAudioFormat - private let playbackNode: AVAudioPlayerNode - private var formatConverter: AVAudioConverter + private let engine: AVAudioEngine + private let inputFormat: AVAudioFormat + private let outputFormat: AVAudioFormat + private let playbackNode: AVAudioPlayerNode + private var formatConverter: AVAudioConverter - init(engine: AVAudioEngine, inputFormat: AVAudioFormat, outputFormat: AVAudioFormat) throws { - self.engine = engine + init(engine: AVAudioEngine, inputFormat: AVAudioFormat, outputFormat: AVAudioFormat) throws { + self.engine = engine - guard let formatConverter = AVAudioConverter(from: inputFormat, to: outputFormat) else { - throw ApplicationError("Failed to create the audio converter") - } - - let playbackNode = AVAudioPlayerNode() + guard let formatConverter = AVAudioConverter(from: inputFormat, to: outputFormat) else { + throw ApplicationError("Failed to create the audio converter") + } - engine.attach(playbackNode) - engine.connect(playbackNode, to: engine.mainMixerNode, format: outputFormat) + let playbackNode = AVAudioPlayerNode() - self.inputFormat = inputFormat - self.outputFormat = outputFormat - self.formatConverter = formatConverter - self.playbackNode = playbackNode - } + engine.attach(playbackNode) + engine.connect(playbackNode, to: engine.mainMixerNode, format: outputFormat) - deinit { - stop() - } - - /// Queue audio to be played through the output device. - /// - /// Note that in a real app, you'd ideally schedule the data before converting it, and then mark data as consumed after its been played - /// back. That way, if the audio route changes during playback, you can requeue the buffer on the new output device. - /// - /// For the sake of simplicity, that is not implemented here; a route change will prevent the currently queued conversation from - /// being played through the output device. - public func play(_ audio: Data) throws { - guard engine.isRunning else { - logger.warning("Audio engine needs to be running to play audio.") - return + self.inputFormat = inputFormat + self.outputFormat = outputFormat + self.formatConverter = formatConverter + self.playbackNode = playbackNode } - guard let inputBuffer = try AVAudioPCMBuffer.fromInterleavedData( - data: audio, - format: inputFormat - ) else { - throw ApplicationError("Failed to create input buffer for playback") + deinit { + stop() } - let buffer = try formatConverter.convertBuffer(inputBuffer) + /// Queue audio to be played through the output device. + /// + /// Note that in a real app, you'd ideally schedule the data before converting it, and then mark data as consumed after its been played + /// back. That way, if the audio route changes during playback, you can requeue the buffer on the new output device. + /// + /// For the sake of simplicity, that is not implemented here; a route change will prevent the currently queued conversation from + /// being played through the output device. + public func play(_ audio: Data) throws { + guard engine.isRunning else { + logger.warning("Audio engine needs to be running to play audio.") + return + } + + guard let inputBuffer = try AVAudioPCMBuffer.fromInterleavedData( + data: audio, + format: inputFormat + ) else { + throw ApplicationError("Failed to create input buffer for playback") + } - playbackNode.scheduleBuffer(buffer, at: nil) - playbackNode.play() - } + let buffer = try formatConverter.convertBuffer(inputBuffer) - /// Stops the current audio playing. - public func interrupt() { - playbackNode.stop() - } + playbackNode.scheduleBuffer(buffer, at: nil) + playbackNode.play() + } + + /// Stops the current audio playing. + public func interrupt() { + playbackNode.stop() + } - /// Permanently stop all audio playback. - public func stop() { - interrupt() - engine.disconnectNodeInput(playbackNode) - engine.disconnectNodeOutput(playbackNode) - } + /// Permanently stop all audio playback. + public func stop() { + interrupt() + engine.disconnectNodeInput(playbackNode) + engine.disconnectNodeOutput(playbackNode) + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift b/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift index 7d182bad6..62e9833d8 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift @@ -12,51 +12,51 @@ // See the License for the specific language governing permissions and // limitations under the License. -import Foundation import AVFoundation +import Foundation /// Microphone bindings using Apple's AudioEngine API. class Microphone { - /// Data recorded from the microphone. - public let audio: AsyncStream - private let audioQueue: AsyncStream.Continuation - - private let inputNode: AVAudioInputNode - private let audioEngine: AVAudioEngine - - private var isRunning = false - - init(engine: AVAudioEngine) { - let (audio, audioQueue) = AsyncStream.makeStream() - - self.audio = audio - self.audioQueue = audioQueue - inputNode = engine.inputNode - audioEngine = engine - } - - deinit { - stop() - } - - public func start() { - guard !isRunning else { return } - isRunning = true - - // 50ms buffer size for balancing latency and cpu overhead - let targetBufferSize = UInt32(inputNode.outputFormat(forBus: 0).sampleRate / 20) - inputNode - .installTap(onBus: 0, bufferSize: targetBufferSize, format: nil) { [weak self] buffer, _ in - guard let self else { return } - audioQueue.yield(buffer) - } - } - - public func stop() { - audioQueue.finish() - if isRunning { - isRunning = false - inputNode.removeTap(onBus: 0) + /// Data recorded from the microphone. + public let audio: AsyncStream + private let audioQueue: AsyncStream.Continuation + + private let inputNode: AVAudioInputNode + private let audioEngine: AVAudioEngine + + private var isRunning = false + + init(engine: AVAudioEngine) { + let (audio, audioQueue) = AsyncStream.makeStream() + + self.audio = audio + self.audioQueue = audioQueue + inputNode = engine.inputNode + audioEngine = engine + } + + deinit { + stop() + } + + public func start() { + guard !isRunning else { return } + isRunning = true + + // 50ms buffer size for balancing latency and cpu overhead + let targetBufferSize = UInt32(inputNode.outputFormat(forBus: 0).sampleRate / 20) + inputNode + .installTap(onBus: 0, bufferSize: targetBufferSize, format: nil) { [weak self] buffer, _ in + guard let self else { return } + audioQueue.yield(buffer) + } + } + + public func stop() { + audioQueue.finish() + if isRunning { + isRunning = false + inputNode.removeTap(onBus: 0) + } } - } } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift b/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift index 0731fba0c..8ac801d28 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift @@ -15,8 +15,8 @@ import Foundation enum BackendOption: String, CaseIterable, Identifiable { - case googleAI = "Google AI" - case vertexAI = "Firebase Vertex AI" + case googleAI = "Google AI" + case vertexAI = "Firebase Vertex AI" - var id: String { rawValue } + var id: String { rawValue } } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift b/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift index 954a05872..126a6866f 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift @@ -14,302 +14,303 @@ import Foundation #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif public struct Sample: Identifiable { - public let id = UUID() - public let title: String - public let description: String - public let useCases: [UseCase] - public let navRoute: String - public let modelName: String - public let chatHistory: [ModelContent]? - public let initialPrompt: String? - public let systemInstruction: ModelContent? - public let tools: [Tool]? - public let generationConfig: GenerationConfig? - public let liveGenerationConfig: LiveGenerationConfig? - public let fileDataParts: [FileDataPart]? - public let tip: InlineTip? + public let id = UUID() + public let title: String + public let description: String + public let useCases: [UseCase] + public let navRoute: String + public let modelName: String + public let chatHistory: [ModelContent]? + public let initialPrompt: String? + public let systemInstruction: ModelContent? + public let tools: [Tool]? + public let generationConfig: GenerationConfig? + public let liveGenerationConfig: LiveGenerationConfig? + public let fileDataParts: [FileDataPart]? + public let tip: InlineTip? - public init(title: String, - description: String, - useCases: [UseCase], - navRoute: String, - modelName: String = "gemini-2.5-flash", - chatHistory: [ModelContent]? = nil, - initialPrompt: String? = nil, - systemInstruction: ModelContent? = nil, - tools: [Tool]? = nil, - generationConfig: GenerationConfig? = nil, - liveGenerationConfig: LiveGenerationConfig? = nil, - fileDataParts: [FileDataPart]? = nil, - tip: InlineTip? = nil) { - self.title = title - self.description = description - self.useCases = useCases - self.navRoute = navRoute - self.modelName = modelName - self.chatHistory = chatHistory - self.initialPrompt = initialPrompt - self.systemInstruction = systemInstruction - self.tools = tools - self.generationConfig = generationConfig - self.liveGenerationConfig = liveGenerationConfig - self.fileDataParts = fileDataParts - self.tip = tip - } + public init(title: String, + description: String, + useCases: [UseCase], + navRoute: String, + modelName: String = "gemini-2.5-flash", + chatHistory: [ModelContent]? = nil, + initialPrompt: String? = nil, + systemInstruction: ModelContent? = nil, + tools: [Tool]? = nil, + generationConfig: GenerationConfig? = nil, + liveGenerationConfig: LiveGenerationConfig? = nil, + fileDataParts: [FileDataPart]? = nil, + tip: InlineTip? = nil) + { + self.title = title + self.description = description + self.useCases = useCases + self.navRoute = navRoute + self.modelName = modelName + self.chatHistory = chatHistory + self.initialPrompt = initialPrompt + self.systemInstruction = systemInstruction + self.tools = tools + self.generationConfig = generationConfig + self.liveGenerationConfig = liveGenerationConfig + self.fileDataParts = fileDataParts + self.tip = tip + } } -extension Sample { - public static let samples: [Sample] = [ - // Text - Sample( - title: "Travel tips", - description: "The user wants the model to help a new traveler" + - " with travel tips", - useCases: [.text], - navRoute: "ChatScreen", - chatHistory: [ - ModelContent( - role: "user", - parts: "I have never traveled before. When should I book a flight?" +public extension Sample { + static let samples: [Sample] = [ + // Text + Sample( + title: "Travel tips", + description: "The user wants the model to help a new traveler" + + " with travel tips", + useCases: [.text], + navRoute: "ChatScreen", + chatHistory: [ + ModelContent( + role: "user", + parts: "I have never traveled before. When should I book a flight?" + ), + ModelContent( + role: "model", + parts: "You should book flights a couple of months ahead of time. It will be cheaper and more flexible for you." + ), + ModelContent(role: "user", parts: "Do I need a passport?"), + ModelContent( + role: "model", + parts: "If you are traveling outside your own country, make sure your passport is up-to-date and valid for more than 6 months during your travel." + ), + ], + initialPrompt: "What else is important when traveling?", + systemInstruction: ModelContent(parts: "You are a Travel assistant. You will answer" + + " questions the user asks based on the information listed" + + " in Relevant Information. Do not hallucinate. Do not use" + + " the internet."), + ), + Sample( + title: "Hello world (with template)", + description: "Uses a template to say hello. The template uses 'name' and 'language' (defaults to Spanish) as inputs.", + useCases: [.text], + navRoute: "GenerateContentFromTemplateScreen", + initialPrompt: "Peter", + systemInstruction: ModelContent( + parts: "The user's name is {{name}}. They prefer to communicate in {{language}}." + ) + ), + Sample( + title: "Chatbot recommendations for courses", + description: "A chatbot suggests courses for a performing arts program.", + useCases: [.text], + navRoute: "ChatScreen", + initialPrompt: "I am interested in Performing Arts. I have taken Theater 1A.", + systemInstruction: ModelContent(parts: "You are a chatbot for the county's performing and fine arts" + + " program. You help students decide what course they will" + + " take during the summer."), + ), + // Image + Sample( + title: "Blog post creator", + description: "Create a blog post from an image file stored in Cloud Storage.", + useCases: [.image], + navRoute: "MultimodalScreen", + initialPrompt: "Write a short, engaging blog post based on this picture." + + " It should include a description of the meal in the" + + " photo and talk about my journey meal prepping.", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/meal-prep.jpeg", + mimeType: "image/jpeg" + ), + ] ), - ModelContent( - role: "model", - parts: "You should book flights a couple of months ahead of time. It will be cheaper and more flexible for you." + Sample( + title: "Imagen - image generation", + description: "Generate images using Imagen 3", + useCases: [.image], + navRoute: "ImagenScreen", + initialPrompt: "A photo of a modern building with water in the background" ), - ModelContent(role: "user", parts: "Do I need a passport?"), - ModelContent( - role: "model", - parts: "If you are traveling outside your own country, make sure your passport is up-to-date and valid for more than 6 months during your travel." + Sample( + title: "[T] Imagen - image generation", + description: "[T] Generate images using Imagen 3", + useCases: [.image], + navRoute: "ImagenFromTemplateScreen", + initialPrompt: "A photo of a modern building with water in the background" ), - ], - initialPrompt: "What else is important when traveling?", - systemInstruction: ModelContent(parts: "You are a Travel assistant. You will answer" + - " questions the user asks based on the information listed" + - " in Relevant Information. Do not hallucinate. Do not use" + - " the internet."), - ), - Sample( - title: "Hello world (with template)", - description: "Uses a template to say hello. The template uses 'name' and 'language' (defaults to Spanish) as inputs.", - useCases: [.text], - navRoute: "GenerateContentFromTemplateScreen", - initialPrompt: "Peter", - systemInstruction: ModelContent( - parts: "The user's name is {{name}}. They prefer to communicate in {{language}}." - ) - ), - Sample( - title: "Chatbot recommendations for courses", - description: "A chatbot suggests courses for a performing arts program.", - useCases: [.text], - navRoute: "ChatScreen", - initialPrompt: "I am interested in Performing Arts. I have taken Theater 1A.", - systemInstruction: ModelContent(parts: "You are a chatbot for the county's performing and fine arts" + - " program. You help students decide what course they will" + - " take during the summer."), - ), - // Image - Sample( - title: "Blog post creator", - description: "Create a blog post from an image file stored in Cloud Storage.", - useCases: [.image], - navRoute: "MultimodalScreen", - initialPrompt: "Write a short, engaging blog post based on this picture." + - " It should include a description of the meal in the" + - " photo and talk about my journey meal prepping.", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/meal-prep.jpeg", - mimeType: "image/jpeg" + Sample( + title: "Gemini Flash - image generation", + description: "Generate and/or edit images using Gemini 2.0 Flash", + useCases: [.image], + navRoute: "ChatScreen", + modelName: "gemini-2.0-flash-preview-image-generation", + initialPrompt: "Hi, can you create a 3d rendered image of a pig " + + "with wings and a top hat flying over a happy " + + "futuristic scifi city with lots of greenery?", + generationConfig: GenerationConfig(responseModalities: [.text, .image]), ), - ] - ), - Sample( - title: "Imagen - image generation", - description: "Generate images using Imagen 3", - useCases: [.image], - navRoute: "ImagenScreen", - initialPrompt: "A photo of a modern building with water in the background" - ), - Sample( - title: "[T] Imagen - image generation", - description: "[T] Generate images using Imagen 3", - useCases: [.image], - navRoute: "ImagenFromTemplateScreen", - initialPrompt: "A photo of a modern building with water in the background" - ), - Sample( - title: "Gemini Flash - image generation", - description: "Generate and/or edit images using Gemini 2.0 Flash", - useCases: [.image], - navRoute: "ChatScreen", - modelName: "gemini-2.0-flash-preview-image-generation", - initialPrompt: "Hi, can you create a 3d rendered image of a pig " + - "with wings and a top hat flying over a happy " + - "futuristic scifi city with lots of greenery?", - generationConfig: GenerationConfig(responseModalities: [.text, .image]), - ), - // Video - Sample( - title: "Hashtags for a video", - description: "Generate hashtags for a video ad stored in Cloud Storage.", - useCases: [.video], - navRoute: "MultimodalScreen", - initialPrompt: "Generate 5-10 hashtags that relate to the video content." + - " Try to use more popular and engaging terms," + - " e.g. #Viral. Do not add content not related to" + - " the video.\n Start the output with 'Tags:'", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/video/google_home_celebrity_ad.mp4", - mimeType: "video/mp4" + // Video + Sample( + title: "Hashtags for a video", + description: "Generate hashtags for a video ad stored in Cloud Storage.", + useCases: [.video], + navRoute: "MultimodalScreen", + initialPrompt: "Generate 5-10 hashtags that relate to the video content." + + " Try to use more popular and engaging terms," + + " e.g. #Viral. Do not add content not related to" + + " the video.\n Start the output with 'Tags:'", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/video/google_home_celebrity_ad.mp4", + mimeType: "video/mp4" + ), + ] ), - ] - ), - Sample( - title: "Summarize video", - description: "Summarize a video and extract important dialogue.", - useCases: [.video], - navRoute: "MultimodalScreen", - chatHistory: [ - ModelContent(role: "user", parts: "Can you help me with the description of a video file?"), - ModelContent( - role: "model", - parts: "Sure! Click on the attach button below and choose a video file for me to describe." + Sample( + title: "Summarize video", + description: "Summarize a video and extract important dialogue.", + useCases: [.video], + navRoute: "MultimodalScreen", + chatHistory: [ + ModelContent(role: "user", parts: "Can you help me with the description of a video file?"), + ModelContent( + role: "model", + parts: "Sure! Click on the attach button below and choose a video file for me to describe." + ), + ], + initialPrompt: "I have attached the video file. Provide a description of" + + " the video. The description should also contain" + + " anything important which people say in the video." + ), + // Audio + Sample( + title: "Audio Summarization", + description: "Summarize an audio file", + useCases: [.audio], + navRoute: "MultimodalScreen", + chatHistory: [ + ModelContent(role: "user", parts: "Can you help me summarize an audio file?"), + ModelContent( + role: "model", + parts: "Of course! Click on the attach button below and choose an audio file for me to summarize." + ), + ], + initialPrompt: "I have attached the audio file. Please analyze it and summarize the contents" + + " of the audio as bullet points." ), - ], - initialPrompt: "I have attached the video file. Provide a description of" + - " the video. The description should also contain" + - " anything important which people say in the video." - ), - // Audio - Sample( - title: "Audio Summarization", - description: "Summarize an audio file", - useCases: [.audio], - navRoute: "MultimodalScreen", - chatHistory: [ - ModelContent(role: "user", parts: "Can you help me summarize an audio file?"), - ModelContent( - role: "model", - parts: "Of course! Click on the attach button below and choose an audio file for me to summarize." + Sample( + title: "Translation from audio", + description: "Translate an audio file stored in Cloud Storage", + useCases: [.audio], + navRoute: "MultimodalScreen", + initialPrompt: "Please translate the audio in Mandarin.", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/audio/How_to_create_a_My_Map_in_Google_Maps.mp3", + mimeType: "audio/mp3" + ), + ] ), - ], - initialPrompt: "I have attached the audio file. Please analyze it and summarize the contents" + - " of the audio as bullet points." - ), - Sample( - title: "Translation from audio", - description: "Translate an audio file stored in Cloud Storage", - useCases: [.audio], - navRoute: "MultimodalScreen", - initialPrompt: "Please translate the audio in Mandarin.", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/audio/How_to_create_a_My_Map_in_Google_Maps.mp3", - mimeType: "audio/mp3" + // Document + Sample( + title: "Document comparison", + description: "Compare the contents of 2 documents." + + " Supported by the Vertex AI Gemini API because the documents are stored in Cloud Storage", + useCases: [.document], + navRoute: "MultimodalScreen", + initialPrompt: "The first document is from 2013, and the second document is" + + " from 2023. How did the standard deduction evolve?", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2013.pdf", + mimeType: "application/pdf" + ), + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2023.pdf", + mimeType: "application/pdf" + ), + ] ), - ] - ), - // Document - Sample( - title: "Document comparison", - description: "Compare the contents of 2 documents." + - " Supported by the Vertex AI Gemini API because the documents are stored in Cloud Storage", - useCases: [.document], - navRoute: "MultimodalScreen", - initialPrompt: "The first document is from 2013, and the second document is" + - " from 2023. How did the standard deduction evolve?", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2013.pdf", - mimeType: "application/pdf" + // Function Calling + Sample( + title: "Weather Chat", + description: "Use function calling to get the weather conditions" + + " for a specific US city on a specific date.", + useCases: [.functionCalling, .text], + navRoute: "FunctionCallingScreen", + initialPrompt: "What was the weather in Boston, MA on October 17, 2024?", + tools: [.functionDeclarations([ + FunctionDeclaration( + name: "fetchWeather", + description: "Get the weather conditions for a specific US city on a specific date", + parameters: [ + "city": .string(description: "The US city of the location"), + "state": .string(description: "The US state of the location"), + "date": .string(description: "The date for which to get the weather." + + " Date must be in the format: YYYY-MM-DD"), + ] + ), + ])] ), - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2023.pdf", - mimeType: "application/pdf" + // Grounding + Sample( + title: "Grounding with Google Search", + description: "Use Grounding with Google Search to get responses based on up-to-date information from the web.", + useCases: [.text], + navRoute: "GroundingScreen", + initialPrompt: "What's the weather in Chicago this weekend?", + tools: [.googleSearch()] ), - ] - ), - // Function Calling - Sample( - title: "Weather Chat", - description: "Use function calling to get the weather conditions" + - " for a specific US city on a specific date.", - useCases: [.functionCalling, .text], - navRoute: "FunctionCallingScreen", - initialPrompt: "What was the weather in Boston, MA on October 17, 2024?", - tools: [.functionDeclarations([ - FunctionDeclaration( - name: "fetchWeather", - description: "Get the weather conditions for a specific US city on a specific date", - parameters: [ - "city": .string(description: "The US city of the location"), - "state": .string(description: "The US state of the location"), - "date": .string(description: "The date for which to get the weather." + - " Date must be in the format: YYYY-MM-DD"), - ] + // Live API + Sample( + title: "Live native audio", + description: "Use the Live API to talk with the model via native audio.", + useCases: [.audio], + navRoute: "LiveScreen", + liveGenerationConfig: LiveGenerationConfig( + responseModalities: [.audio], + speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), + outputAudioTranscription: AudioTranscriptionConfig() + ) ), - ])] - ), - // Grounding - Sample( - title: "Grounding with Google Search", - description: "Use Grounding with Google Search to get responses based on up-to-date information from the web.", - useCases: [.text], - navRoute: "GroundingScreen", - initialPrompt: "What's the weather in Chicago this weekend?", - tools: [.googleSearch()] - ), - // Live API - Sample( - title: "Live native audio", - description: "Use the Live API to talk with the model via native audio.", - useCases: [.audio], - navRoute: "LiveScreen", - liveGenerationConfig: LiveGenerationConfig( - responseModalities: [.audio], - speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), - outputAudioTranscription: AudioTranscriptionConfig() - ) - ), - Sample( - title: "Live function calling", - description: "Use function calling with the Live API to ask the model to change the background color.", - useCases: [.functionCalling, .audio], - navRoute: "LiveScreen", - tools: [ - .functionDeclarations([ - FunctionDeclaration( - name: "changeBackgroundColor", - description: "Changes the background color to the specified hex color.", - parameters: [ - "color": .string( - description: "Hex code of the color to change to. (eg, #F54927)" - ), + Sample( + title: "Live function calling", + description: "Use function calling with the Live API to ask the model to change the background color.", + useCases: [.functionCalling, .audio], + navRoute: "LiveScreen", + tools: [ + .functionDeclarations([ + FunctionDeclaration( + name: "changeBackgroundColor", + description: "Changes the background color to the specified hex color.", + parameters: [ + "color": .string( + description: "Hex code of the color to change to. (eg, #F54927)" + ), + ], + ), + FunctionDeclaration( + name: "clearBackgroundColor", + description: "Removes the background color.", + parameters: [:] + ), + ]), ], - ), - FunctionDeclaration( - name: "clearBackgroundColor", - description: "Removes the background color.", - parameters: [:] - ), - ]), - ], - liveGenerationConfig: LiveGenerationConfig( - responseModalities: [.audio], - speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), - outputAudioTranscription: AudioTranscriptionConfig() - ), - tip: InlineTip(text: "Try asking the model to change the background color"), - ), - ] + liveGenerationConfig: LiveGenerationConfig( + responseModalities: [.audio], + speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), + outputAudioTranscription: AudioTranscriptionConfig() + ), + tip: InlineTip(text: "Try asking the model to change the background color"), + ), + ] - public static var sample = samples[0] + static var sample = samples[0] } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift b/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift index ee4e80f8a..d57573070 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift @@ -15,13 +15,13 @@ import Foundation public enum UseCase: String, CaseIterable, Identifiable { - case all = "All" - case text = "Text" - case image = "Image" - case video = "Video" - case audio = "Audio" - case document = "Document" - case functionCalling = "Function Calling" + case all = "All" + case text = "Text" + case image = "Image" + case video = "Video" + case audio = "Audio" + case document = "Document" + case functionCalling = "Function Calling" - public var id: String { rawValue } + public var id: String { rawValue } } diff --git a/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift b/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift index 74e0d1513..1e7f71aa1 100644 --- a/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift +++ b/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift @@ -15,30 +15,30 @@ import SwiftUI extension Color { - /// Creates a new `Color` instance from a hex string. - /// - /// Supports both RGB and RGBA hex strings. - init?(hex: String) { - let hex = hex.replacingOccurrences(of: "#", with: "").uppercased() + /// Creates a new `Color` instance from a hex string. + /// + /// Supports both RGB and RGBA hex strings. + init?(hex: String) { + let hex = hex.replacingOccurrences(of: "#", with: "").uppercased() - var rgb: UInt64 = 0 - guard Scanner(string: hex).scanHexInt64(&rgb) else { return nil } + var rgb: UInt64 = 0 + guard Scanner(string: hex).scanHexInt64(&rgb) else { return nil } - var r: CGFloat = 0, g: CGFloat = 0, b: CGFloat = 0, a: CGFloat = 1 + var r: CGFloat = 0, g: CGFloat = 0, b: CGFloat = 0, a: CGFloat = 1 - if hex.count == 6 { - r = CGFloat((rgb & 0xFF0000) >> 16) / 255.0 - g = CGFloat((rgb & 0x00FF00) >> 8) / 255.0 - b = CGFloat(rgb & 0x0000FF) / 255.0 - } else if hex.count == 8 { - r = CGFloat((rgb & 0xFF00_0000) >> 24) / 255.0 - g = CGFloat((rgb & 0x00FF_0000) >> 16) / 255.0 - b = CGFloat((rgb & 0x0000_FF00) >> 8) / 255.0 - a = CGFloat(rgb & 0x0000_00FF) / 255.0 - } else { - return nil - } + if hex.count == 6 { + r = CGFloat((rgb & 0xFF0000) >> 16) / 255.0 + g = CGFloat((rgb & 0x00FF00) >> 8) / 255.0 + b = CGFloat(rgb & 0x0000FF) / 255.0 + } else if hex.count == 8 { + r = CGFloat((rgb & 0xFF00_0000) >> 24) / 255.0 + g = CGFloat((rgb & 0x00FF_0000) >> 16) / 255.0 + b = CGFloat((rgb & 0x0000_FF00) >> 8) / 255.0 + a = CGFloat(rgb & 0x0000_00FF) / 255.0 + } else { + return nil + } - self.init(red: r, green: g, blue: b, opacity: a) - } + self.init(red: r, green: g, blue: b, opacity: a) + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift b/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift index cd1e3f60e..3bd6ae81c 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift @@ -13,252 +13,252 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import MarkdownUI import SwiftUI private extension HarmCategory { - /// Returns a description of the `HarmCategory` suitable for displaying in the UI. - var displayValue: String { - switch self { - case .dangerousContent: "Dangerous content" - case .harassment: "Harassment" - case .hateSpeech: "Hate speech" - case .sexuallyExplicit: "Sexually explicit" - case .civicIntegrity: "Civic integrity" - default: "Unknown HarmCategory: \(rawValue)" + /// Returns a description of the `HarmCategory` suitable for displaying in the UI. + var displayValue: String { + switch self { + case .dangerousContent: "Dangerous content" + case .harassment: "Harassment" + case .hateSpeech: "Hate speech" + case .sexuallyExplicit: "Sexually explicit" + case .civicIntegrity: "Civic integrity" + default: "Unknown HarmCategory: \(rawValue)" + } } - } } private extension SafetyRating.HarmProbability { - /// Returns a description of the `HarmProbability` suitable for displaying in the UI. - var displayValue: String { - switch self { - case .high: "High" - case .low: "Low" - case .medium: "Medium" - case .negligible: "Negligible" - default: "Unknown HarmProbability: \(rawValue)" + /// Returns a description of the `HarmProbability` suitable for displaying in the UI. + var displayValue: String { + switch self { + case .high: "High" + case .low: "Low" + case .medium: "Medium" + case .negligible: "Negligible" + default: "Unknown HarmProbability: \(rawValue)" + } } - } } private struct SubtitleFormRow: View { - var title: String - var value: String + var title: String + var value: String - var body: some View { - VStack(alignment: .leading) { - Text(title) - .font(.subheadline) - Text(value) + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Text(value) + } } - } } private struct SubtitleMarkdownFormRow: View { - var title: String - var value: String + var title: String + var value: String - var body: some View { - VStack(alignment: .leading) { - Text(title) - .font(.subheadline) - Markdown(value) + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Markdown(value) + } } - } } private struct SafetyRatingsSection: View { - var ratings: [SafetyRating] + var ratings: [SafetyRating] - var body: some View { - Section("Safety ratings") { - List(ratings, id: \.self) { rating in - HStack { - Text(rating.category.displayValue).font(.subheadline) - Spacer() - Text(rating.probability.displayValue) + var body: some View { + Section("Safety ratings") { + List(ratings, id: \.self) { rating in + HStack { + Text(rating.category.displayValue).font(.subheadline) + Spacer() + Text(rating.probability.displayValue) + } + } } - } } - } } struct ErrorDetailsView: View { - var error: Error + var error: Error - var body: some View { - NavigationView { - Form { - switch error { - case let GenerateContentError.internalError(underlying: underlyingError): - Section("Error Type") { - Text("Internal error") - } + var body: some View { + NavigationView { + Form { + switch error { + case let GenerateContentError.internalError(underlying: underlyingError): + Section("Error Type") { + Text("Internal error") + } - Section("Details") { - SubtitleFormRow(title: "Error description", - value: underlyingError.localizedDescription) - } + Section("Details") { + SubtitleFormRow(title: "Error description", + value: underlyingError.localizedDescription) + } - case let GenerateContentError.promptBlocked(response: generateContentResponse): - Section("Error Type") { - Text("Your prompt was blocked") - } + case let GenerateContentError.promptBlocked(response: generateContentResponse): + Section("Error Type") { + Text("Your prompt was blocked") + } - Section("Details") { - if let reason = generateContentResponse.promptFeedback?.blockReason { - SubtitleFormRow(title: "Reason for blocking", value: reason.rawValue) - } + Section("Details") { + if let reason = generateContentResponse.promptFeedback?.blockReason { + SubtitleFormRow(title: "Reason for blocking", value: reason.rawValue) + } - if let text = generateContentResponse.text { - SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) - } - } + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } - if let ratings = generateContentResponse.candidates.first?.safetyRatings { - SafetyRatingsSection(ratings: ratings) - } + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } - case let GenerateContentError.responseStoppedEarly( - reason: finishReason, - response: generateContentResponse - ): + case let GenerateContentError.responseStoppedEarly( + reason: finishReason, + response: generateContentResponse + ): - Section("Error Type") { - Text("Response stopped early") - } + Section("Error Type") { + Text("Response stopped early") + } - Section("Details") { - SubtitleFormRow(title: "Reason for finishing early", value: finishReason.rawValue) + Section("Details") { + SubtitleFormRow(title: "Reason for finishing early", value: finishReason.rawValue) - if let text = generateContentResponse.text { - SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) - } - } + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } - if let ratings = generateContentResponse.candidates.first?.safetyRatings { - SafetyRatingsSection(ratings: ratings) - } + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } - default: - Section("Error Type") { - Text("Some other error") - } + default: + Section("Error Type") { + Text("Some other error") + } - Section("Details") { - SubtitleFormRow(title: "Error description", value: error.localizedDescription) - } + Section("Details") { + SubtitleFormRow(title: "Error description", value: error.localizedDescription) + } + } + } + .navigationTitle("Error details") + .navigationBarTitleDisplayMode(.inline) } - } - .navigationTitle("Error details") - .navigationBarTitleDisplayMode(.inline) } - } } #Preview("Response Stopped Early") { - let error = GenerateContentError.responseStoppedEarly( - reason: .maxTokens, - response: GenerateContentResponse(candidates: [ - Candidate(content: ModelContent(role: "model", parts: - """ - A _hypothetical_ model response. - Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. - """), - safetyRatings: [ - SafetyRating( - category: .dangerousContent, - probability: .medium, - probabilityScore: 0.8, - severity: .medium, - severityScore: 0.9, - blocked: false - ), - SafetyRating( - category: .harassment, - probability: .low, - probabilityScore: 0.5, - severity: .low, - severityScore: 0.6, - blocked: false - ), - SafetyRating( - category: .hateSpeech, - probability: .low, - probabilityScore: 0.3, - severity: .medium, - severityScore: 0.2, - blocked: false - ), - SafetyRating( - category: .sexuallyExplicit, - probability: .low, - probabilityScore: 0.2, - severity: .negligible, - severityScore: 0.5, - blocked: false - ), - ], - finishReason: FinishReason.maxTokens, - citationMetadata: nil), - ]) - ) + let error = GenerateContentError.responseStoppedEarly( + reason: .maxTokens, + response: GenerateContentResponse(candidates: [ + Candidate(content: ModelContent(role: "model", parts: + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """), + safetyRatings: [ + SafetyRating( + category: .dangerousContent, + probability: .medium, + probabilityScore: 0.8, + severity: .medium, + severityScore: 0.9, + blocked: false + ), + SafetyRating( + category: .harassment, + probability: .low, + probabilityScore: 0.5, + severity: .low, + severityScore: 0.6, + blocked: false + ), + SafetyRating( + category: .hateSpeech, + probability: .low, + probabilityScore: 0.3, + severity: .medium, + severityScore: 0.2, + blocked: false + ), + SafetyRating( + category: .sexuallyExplicit, + probability: .low, + probabilityScore: 0.2, + severity: .negligible, + severityScore: 0.5, + blocked: false + ), + ], + finishReason: FinishReason.maxTokens, + citationMetadata: nil), + ]) + ) - return ErrorDetailsView(error: error) + return ErrorDetailsView(error: error) } #Preview("Prompt Blocked") { - let error = GenerateContentError.promptBlocked( - response: GenerateContentResponse(candidates: [ - Candidate(content: ModelContent(role: "model", parts: - """ - A _hypothetical_ model response. - Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. - """), - safetyRatings: [ - SafetyRating( - category: .dangerousContent, - probability: .low, - probabilityScore: 0.8, - severity: .medium, - severityScore: 0.9, - blocked: false - ), - SafetyRating( - category: .harassment, - probability: .low, - probabilityScore: 0.5, - severity: .low, - severityScore: 0.6, - blocked: false - ), - SafetyRating( - category: .hateSpeech, - probability: .low, - probabilityScore: 0.3, - severity: .medium, - severityScore: 0.2, - blocked: false - ), - SafetyRating( - category: .sexuallyExplicit, - probability: .low, - probabilityScore: 0.2, - severity: .negligible, - severityScore: 0.5, - blocked: false - ), - ], - finishReason: FinishReason.other, - citationMetadata: nil), - ]) - ) + let error = GenerateContentError.promptBlocked( + response: GenerateContentResponse(candidates: [ + Candidate(content: ModelContent(role: "model", parts: + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """), + safetyRatings: [ + SafetyRating( + category: .dangerousContent, + probability: .low, + probabilityScore: 0.8, + severity: .medium, + severityScore: 0.9, + blocked: false + ), + SafetyRating( + category: .harassment, + probability: .low, + probabilityScore: 0.5, + severity: .low, + severityScore: 0.6, + blocked: false + ), + SafetyRating( + category: .hateSpeech, + probability: .low, + probabilityScore: 0.3, + severity: .medium, + severityScore: 0.2, + blocked: false + ), + SafetyRating( + category: .sexuallyExplicit, + probability: .low, + probabilityScore: 0.2, + severity: .negligible, + severityScore: 0.5, + blocked: false + ), + ], + finishReason: FinishReason.other, + citationMetadata: nil), + ]) + ) - return ErrorDetailsView(error: error) + return ErrorDetailsView(error: error) } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift b/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift index 1e6e436d0..a1f831365 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift @@ -15,40 +15,40 @@ import SwiftUI struct FilterChipView: View { - let useCase: UseCase - let isSelected: Bool - let action: () -> Void + let useCase: UseCase + let isSelected: Bool + let action: () -> Void - var body: some View { - Button(action: action) { - Text(useCase.rawValue) - .padding(.horizontal) + var body: some View { + Button(action: action) { + Text(useCase.rawValue) + .padding(.horizontal) + } + .filterChipStyle(isSelected: isSelected) } - .filterChipStyle(isSelected: isSelected) - } } private struct FilterChipStyle: ViewModifier { - let isSelected: Bool + let isSelected: Bool - func body(content: Content) -> some View { - if isSelected { - content.buttonStyle(.borderedProminent) - } else { - content.buttonStyle(.bordered) + func body(content: Content) -> some View { + if isSelected { + content.buttonStyle(.borderedProminent) + } else { + content.buttonStyle(.bordered) + } } - } } extension View { - func filterChipStyle(isSelected: Bool) -> some View { - modifier(FilterChipStyle(isSelected: isSelected)) - } + func filterChipStyle(isSelected: Bool) -> some View { + modifier(FilterChipStyle(isSelected: isSelected)) + } } #Preview { - VStack(spacing: 16) { - FilterChipView(useCase: .text, isSelected: true) {} - FilterChipView(useCase: .text, isSelected: false) {} - } + VStack(spacing: 16) { + FilterChipView(useCase: .text, isSelected: true) {} + FilterChipView(useCase: .text, isSelected: false) {} + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift b/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift index ff72d9b5e..8002dde3b 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift @@ -15,43 +15,43 @@ import TipKit public struct InlineTip: Tip { - private let _text: String - private let _title: String - private let _icon: Image + private let _text: String + private let _title: String + private let _icon: Image - public init(text: String, title: String = "Tip", icon: Image = Image(systemName: "info.circle")) { - _text = text - _title = title - _icon = icon - } + public init(text: String, title: String = "Tip", icon: Image = Image(systemName: "info.circle")) { + _text = text + _title = title + _icon = icon + } - public var title: Text { - Text(_title) - } + public var title: Text { + Text(_title) + } - public var message: Text? { - Text(_text) - } + public var message: Text? { + Text(_text) + } - public var image: Image? { - _icon - } + public var image: Image? { + _icon + } } #Preview { - TipView(InlineTip(text: "Try asking the model to change the background color")) - TipView( - InlineTip( - text: "You shouldn't do that.", - title: "Warning", - icon: Image(systemName: "exclamationmark.circle") + TipView(InlineTip(text: "Try asking the model to change the background color")) + TipView( + InlineTip( + text: "You shouldn't do that.", + title: "Warning", + icon: Image(systemName: "exclamationmark.circle") + ) ) - ) - TipView( - InlineTip( - text: "Oops, try again!", - title: "Error", - icon: Image(systemName: "x.circle") + TipView( + InlineTip( + text: "Oops, try again!", + title: "Error", + icon: Image(systemName: "x.circle") + ) ) - ) } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift b/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift index b2391f77b..bbc96980a 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift @@ -15,26 +15,26 @@ import SwiftUI struct ProgressOverlay: View { - var body: some View { - ZStack { - Color.black.opacity(0.3) - .ignoresSafeArea() + var body: some View { + ZStack { + Color.black.opacity(0.3) + .ignoresSafeArea() - ZStack { - RoundedRectangle(cornerRadius: 16) - .fill(Material.ultraThinMaterial) - .frame(width: 120, height: 100) - .shadow(radius: 8) + ZStack { + RoundedRectangle(cornerRadius: 16) + .fill(Material.ultraThinMaterial) + .frame(width: 120, height: 100) + .shadow(radius: 8) - VStack(spacing: 12) { - ProgressView() - .scaleEffect(1.5) + VStack(spacing: 12) { + ProgressView() + .scaleEffect(1.5) - Text("Loading...") - .font(.subheadline) - .foregroundColor(.secondary) + Text("Loading...") + .font(.subheadline) + .foregroundColor(.secondary) + } + } } - } } - } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift b/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift index 66bb862d5..85da2ae52 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift @@ -15,111 +15,111 @@ import SwiftUI struct SampleCardView: View { - let sample: Sample + let sample: Sample - var body: some View { - GroupBox { - Text(sample.description) - .font(.system(size: 14)) - .foregroundColor(.secondary) - .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) - } label: { - if let useCase = sample.useCases.first { - Label(sample.title, systemImage: systemName(for: useCase)) - .font(.system(size: 17, weight: .medium)) - .foregroundColor(color(for: useCase)) - } else { - Text(sample.title) - .font(.system(size: 17, weight: .medium)) - } + var body: some View { + GroupBox { + Text(sample.description) + .font(.system(size: 14)) + .foregroundColor(.secondary) + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } label: { + if let useCase = sample.useCases.first { + Label(sample.title, systemImage: systemName(for: useCase)) + .font(.system(size: 17, weight: .medium)) + .foregroundColor(color(for: useCase)) + } else { + Text(sample.title) + .font(.system(size: 17, weight: .medium)) + } + } + .groupBoxStyle(CardGroupBoxStyle()) + .frame(maxWidth: .infinity, minHeight: 150, maxHeight: .infinity, alignment: .top) } - .groupBoxStyle(CardGroupBoxStyle()) - .frame(maxWidth: .infinity, minHeight: 150, maxHeight: .infinity, alignment: .top) - } - private func systemName(for useCase: UseCase) -> String { - switch useCase { - case .all: "square.grid.2x2.fill" - case .text: "text.bubble.fill" - case .image: "photo.fill" - case .video: "video.fill" - case .audio: "waveform" - case .document: "doc.fill" - case .functionCalling: "gearshape.2.fill" + private func systemName(for useCase: UseCase) -> String { + switch useCase { + case .all: "square.grid.2x2.fill" + case .text: "text.bubble.fill" + case .image: "photo.fill" + case .video: "video.fill" + case .audio: "waveform" + case .document: "doc.fill" + case .functionCalling: "gearshape.2.fill" + } } - } - private func color(for useCase: UseCase) -> Color { - switch useCase { - case .all:.primary - case .text:.blue - case .image:.purple - case .video:.red - case .audio:.orange - case .document:.gray - case .functionCalling:.green + private func color(for useCase: UseCase) -> Color { + switch useCase { + case .all:.primary + case .text:.blue + case .image:.purple + case .video:.red + case .audio:.orange + case .document:.gray + case .functionCalling:.green + } } - } } public struct CardGroupBoxStyle: GroupBoxStyle { - private var cornerRadius: CGFloat { - if #available(iOS 26.0, *) { - return 28 - } else { - return 12 + private var cornerRadius: CGFloat { + if #available(iOS 26.0, *) { + return 28 + } else { + return 12 + } } - } - public func makeBody(configuration: Configuration) -> some View { - VStack(alignment: .leading, spacing: 12) { - configuration.label - configuration.content + public func makeBody(configuration: Configuration) -> some View { + VStack(alignment: .leading, spacing: 12) { + configuration.label + configuration.content + } + .padding() + .background(Color(.secondarySystemGroupedBackground)) + .clipShape(RoundedRectangle(cornerRadius: cornerRadius, style: .continuous)) } - .padding() - .background(Color(.secondarySystemGroupedBackground)) - .clipShape(RoundedRectangle(cornerRadius: cornerRadius, style: .continuous)) - } } #Preview { - let samples = [ - Sample( - title: "Sample 1", - description: "This is the first sample card.", - useCases: [.text], - navRoute: "ConversationScreen" - ), - Sample( - title: "Sample 2", - description: "This is the second sample card.", - useCases: [.image], - navRoute: "PhotoReasoningScreen" - ), - Sample( - title: "Sample 3", - description: "This is the third sample card.", - useCases: [.video], - navRoute: "ConversationScreen" - ), - Sample( - title: "Sample 4", - description: "This is the fourth sample card, which is a bit longer to see how the text wraps and if everything still aligns correctly.", - useCases: [.audio], - navRoute: "ConversationScreen" - ), - ] + let samples = [ + Sample( + title: "Sample 1", + description: "This is the first sample card.", + useCases: [.text], + navRoute: "ConversationScreen" + ), + Sample( + title: "Sample 2", + description: "This is the second sample card.", + useCases: [.image], + navRoute: "PhotoReasoningScreen" + ), + Sample( + title: "Sample 3", + description: "This is the third sample card.", + useCases: [.video], + navRoute: "ConversationScreen" + ), + Sample( + title: "Sample 4", + description: "This is the fourth sample card, which is a bit longer to see how the text wraps and if everything still aligns correctly.", + useCases: [.audio], + navRoute: "ConversationScreen" + ), + ] - ScrollView { - LazyVGrid(columns: [ - GridItem(.flexible()), - GridItem(.flexible()), - ], spacing: 16) { - ForEach(samples) { sample in - SampleCardView(sample: sample) - } + ScrollView { + LazyVGrid(columns: [ + GridItem(.flexible()), + GridItem(.flexible()), + ], spacing: 16) { + ForEach(samples) { sample in + SampleCardView(sample: sample) + } + } + .padding() } - .padding() - } - .background(Color(.systemGroupedBackground)) + .background(Color(.systemGroupedBackground)) } From 624de52ef574d2c8908081f1850cb17825873c40 Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 17:10:47 -0500 Subject: [PATCH 09/10] Revert "Style" This reverts commit 05fb805f9be1d31f4ceeea1007943dae06a96d46. --- .../FirebaseAIExample/ContentView.swift | 174 +++--- .../Features/Chat/Models/ChatMessage.swift | 191 +++--- .../Features/Chat/Screens/ChatScreen.swift | 85 ++- .../Chat/ViewModels/ChatViewModel.swift | 284 ++++----- .../Features/Chat/Views/BouncingDots.swift | 96 +-- .../Features/Chat/Views/MessageView.swift | 240 ++++---- .../Screens/FunctionCallingScreen.swift | 85 ++- .../Services/WeatherService.swift | 18 +- .../ViewModels/FunctionCallingViewModel.swift | 468 ++++++++------- .../GenerateContentFromTemplateScreen.swift | 142 ++--- ...GenerateContentFromTemplateViewModel.swift | 138 ++--- .../Grounding/Screens/GroundingScreen.swift | 85 ++- .../ViewModels/GroundingViewModel.swift | 266 ++++----- .../Views/GoogleSearchSuggestionView.swift | 101 ++-- .../Views/GroundedResponseView.swift | 102 ++-- .../Imagen/ImagenFromTemplateScreen.swift | 152 ++--- .../Imagen/ImagenFromTemplateViewModel.swift | 132 ++--- .../Features/Imagen/ImagenScreen.swift | 162 ++--- .../Features/Imagen/ImagenViewModel.swift | 146 ++--- .../Features/Live/Screens/LiveScreen.swift | 88 +-- .../Live/ViewModels/LiveViewModel.swift | 474 +++++++-------- .../Live/ViewModels/TypeWriterViewModel.swift | 122 ++-- .../Live/Views/AudioOutputToggle.swift | 30 +- .../Features/Live/Views/ConnectButton.swift | 166 +++--- .../Features/Live/Views/ModelAvatar.swift | 84 +-- .../Features/Live/Views/TranscriptView.swift | 44 +- .../Models/MultimodalAttachment.swift | 475 +++++++-------- .../Multimodal/Screens/MultimodalScreen.swift | 303 +++++----- .../ViewModels/MultimodalViewModel.swift | 356 +++++------ .../Views/AttachmentPreviewCard.swift | 272 ++++----- .../FirebaseAIExampleApp.swift | 61 +- .../Shared/ApplicationError.swift | 18 +- .../Shared/Audio/AudioBufferHelpers.swift | 118 ++-- .../Shared/Audio/AudioPlayer.swift | 108 ++-- .../Shared/Audio/Microphone.swift | 84 +-- .../Shared/Models/BackendOption.swift | 6 +- .../Shared/Models/Sample.swift | 557 +++++++++--------- .../Shared/Models/UseCase.swift | 16 +- .../Shared/Util/Color+Hex.swift | 44 +- .../Shared/Views/ErrorDetailsView.swift | 390 ++++++------ .../Shared/Views/FilterChipView.swift | 46 +- .../Shared/Views/InlineTip.swift | 60 +- .../Shared/Views/ProgressOverlay.swift | 34 +- .../Shared/Views/SampleCardView.swift | 180 +++--- 44 files changed, 3598 insertions(+), 3605 deletions(-) diff --git a/firebaseai/FirebaseAIExample/ContentView.swift b/firebaseai/FirebaseAIExample/ContentView.swift index 390178581..830630f38 100644 --- a/firebaseai/FirebaseAIExample/ContentView.swift +++ b/firebaseai/FirebaseAIExample/ContentView.swift @@ -12,112 +12,112 @@ // See the License for the specific language governing permissions and // limitations under the License. -import FirebaseAILogic import SwiftUI +import FirebaseAILogic struct ContentView: View { - @State private var selectedBackend: BackendOption = .googleAI - @State private var selectedUseCase: UseCase = .all + @State private var selectedBackend: BackendOption = .googleAI + @State private var selectedUseCase: UseCase = .all - var filteredSamples: [Sample] { - if selectedUseCase == .all { - return Sample.samples - } else { - return Sample.samples.filter { $0.useCases.contains(selectedUseCase) } - } + var filteredSamples: [Sample] { + if selectedUseCase == .all { + return Sample.samples + } else { + return Sample.samples.filter { $0.useCases.contains(selectedUseCase) } } + } - let columns = [ - GridItem(.adaptive(minimum: 150)), - ] + let columns = [ + GridItem(.adaptive(minimum: 150)), + ] - var body: some View { - NavigationStack { - ScrollView { - VStack(alignment: .leading, spacing: 20) { - // Backend Configuration - VStack(alignment: .leading) { - Text("Backend Configuration") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + var body: some View { + NavigationStack { + ScrollView { + VStack(alignment: .leading, spacing: 20) { + // Backend Configuration + VStack(alignment: .leading) { + Text("Backend Configuration") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - Picker("Backend", selection: $selectedBackend) { - ForEach(BackendOption.allCases) { option in - Text(option.rawValue) - .tag(option) - } - } - .pickerStyle(SegmentedPickerStyle()) - .padding(.horizontal) - } + Picker("Backend", selection: $selectedBackend) { + ForEach(BackendOption.allCases) { option in + Text(option.rawValue) + .tag(option) + } + } + .pickerStyle(SegmentedPickerStyle()) + .padding(.horizontal) + } - // Use Case Filter - VStack(alignment: .leading) { - Text("Filter by use case") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + // Use Case Filter + VStack(alignment: .leading) { + Text("Filter by use case") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - ScrollView(.horizontal, showsIndicators: false) { - HStack(spacing: 10) { - ForEach(UseCase.allCases) { useCase in - FilterChipView(useCase: useCase, isSelected: selectedUseCase == useCase) { - selectedUseCase = useCase - } - } - } - .padding(.horizontal) - } - } + ScrollView(.horizontal, showsIndicators: false) { + HStack(spacing: 10) { + ForEach(UseCase.allCases) { useCase in + FilterChipView(useCase: useCase, isSelected: selectedUseCase == useCase) { + selectedUseCase = useCase + } + } + } + .padding(.horizontal) + } + } - // Samples - VStack(alignment: .leading) { - Text("Samples") - .font(.system(size: 20, weight: .bold)) - .padding(.horizontal) + // Samples + VStack(alignment: .leading) { + Text("Samples") + .font(.system(size: 20, weight: .bold)) + .padding(.horizontal) - LazyVGrid(columns: columns, spacing: 20) { - ForEach(filteredSamples) { sample in - NavigationLink(destination: destinationView(for: sample)) { - SampleCardView(sample: sample) - } - .buttonStyle(PlainButtonStyle()) - } - } - .padding(.horizontal) - } + LazyVGrid(columns: columns, spacing: 20) { + ForEach(filteredSamples) { sample in + NavigationLink(destination: destinationView(for: sample)) { + SampleCardView(sample: sample) } - .padding(.vertical) + .buttonStyle(PlainButtonStyle()) + } } - .background(Color(.systemGroupedBackground)) - .navigationTitle("Firebase AI Logic") + .padding(.horizontal) + } } + .padding(.vertical) + } + .background(Color(.systemGroupedBackground)) + .navigationTitle("Firebase AI Logic") } + } - @ViewBuilder - private func destinationView(for sample: Sample) -> some View { - switch sample.navRoute { - case "ChatScreen": - ChatScreen(backendType: selectedBackend, sample: sample) - case "ImagenScreen": - ImagenScreen(backendType: selectedBackend, sample: sample) - case "ImagenFromTemplateScreen": - ImagenFromTemplateScreen(backendType: selectedBackend, sample: sample) - case "GenerateContentFromTemplateScreen": - GenerateContentFromTemplateScreen(backendType: selectedBackend, sample: sample) - case "MultimodalScreen": - MultimodalScreen(backendType: selectedBackend, sample: sample) - case "FunctionCallingScreen": - FunctionCallingScreen(backendType: selectedBackend, sample: sample) - case "GroundingScreen": - GroundingScreen(backendType: selectedBackend, sample: sample) - case "LiveScreen": - LiveScreen(backendType: selectedBackend, sample: sample) - default: - EmptyView() - } + @ViewBuilder + private func destinationView(for sample: Sample) -> some View { + switch sample.navRoute { + case "ChatScreen": + ChatScreen(backendType: selectedBackend, sample: sample) + case "ImagenScreen": + ImagenScreen(backendType: selectedBackend, sample: sample) + case "ImagenFromTemplateScreen": + ImagenFromTemplateScreen(backendType: selectedBackend, sample: sample) + case "GenerateContentFromTemplateScreen": + GenerateContentFromTemplateScreen(backendType: selectedBackend, sample: sample) + case "MultimodalScreen": + MultimodalScreen(backendType: selectedBackend, sample: sample) + case "FunctionCallingScreen": + FunctionCallingScreen(backendType: selectedBackend, sample: sample) + case "GroundingScreen": + GroundingScreen(backendType: selectedBackend, sample: sample) + case "LiveScreen": + LiveScreen(backendType: selectedBackend, sample: sample) + default: + EmptyView() } + } } #Preview { - ContentView() + ContentView() } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift b/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift index 05a5e67c1..b181ca19d 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Models/ChatMessage.swift @@ -13,122 +13,121 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import ConversationKit import Foundation +import ConversationKit import UIKit public struct ChatMessage: Message { - public let id: UUID = .init() - public var content: String? - public let participant: Participant - public let error: (any Error)? - public var pending = false - public var groundingMetadata: GroundingMetadata? - public var attachments: [MultimodalAttachment] = [] - public var image: UIImage? - // required by the Message protocol, but not used in this app - public var imageURL: String? - - public init(content: String? = nil, imageURL: String? = nil, participant: Participant, - error: (any Error)? = nil, pending: Bool = false, - attachments: [MultimodalAttachment] = [], image: UIImage? = nil) - { - self.content = content - self.imageURL = imageURL - self.participant = participant - self.error = error - self.pending = pending - self.attachments = attachments - self.image = image - } - - // Protocol-required initializer - public init(content: String?, imageURL: String? = nil, participant: Participant) { - self.content = content - self.imageURL = imageURL - self.participant = participant - error = nil - } + public let id: UUID = .init() + public var content: String? + public let participant: Participant + public let error: (any Error)? + public var pending = false + public var groundingMetadata: GroundingMetadata? + public var attachments: [MultimodalAttachment] = [] + public var image: UIImage? + // required by the Message protocol, but not used in this app + public var imageURL: String? + + public init(content: String? = nil, imageURL: String? = nil, participant: Participant, + error: (any Error)? = nil, pending: Bool = false, + attachments: [MultimodalAttachment] = [], image: UIImage? = nil) { + self.content = content + self.imageURL = imageURL + self.participant = participant + self.error = error + self.pending = pending + self.attachments = attachments + self.image = image + } + + // Protocol-required initializer + public init(content: String?, imageURL: String? = nil, participant: Participant) { + self.content = content + self.imageURL = imageURL + self.participant = participant + error = nil + } } -public extension ChatMessage { - static func pending(participant: Participant) -> ChatMessage { - Self(content: "", participant: participant, pending: true) - } +extension ChatMessage { + public static func pending(participant: Participant) -> ChatMessage { + Self(content: "", participant: participant, pending: true) + } } // Implement Equatable and Hashable for ChatMessage (ignore error) +extension ChatMessage { + public static func == (lhs: ChatMessage, rhs: ChatMessage) -> Bool { + lhs.id == rhs.id && + lhs.content == rhs.content && + lhs.participant == rhs.participant && + lhs.image == rhs.image && + lhs.attachments == rhs.attachments + // intentionally ignore `error` + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(id) + hasher.combine(content) + hasher.combine(participant) + hasher.combine(image) + hasher.combine(attachments) + // intentionally ignore `error` + } +} + public extension ChatMessage { - static func == (lhs: ChatMessage, rhs: ChatMessage) -> Bool { - lhs.id == rhs.id && - lhs.content == rhs.content && - lhs.participant == rhs.participant && - lhs.image == rhs.image && - lhs.attachments == rhs.attachments - // intentionally ignore `error` + static var samples: [ChatMessage] = [ + .init(content: "Hello. What can I do for you today?", participant: .other), + .init(content: "Show me a simple loop in Swift.", participant: .user), + .init(content: """ + Sure, here is a simple loop in Swift: + + # Example 1 + ``` + for i in 1...5 { + print("Hello, world!") } + ``` + + This loop will print the string "Hello, world!" five times. The for loop iterates over a range of numbers, + in this case the numbers from 1 to 5. The variable i is assigned each number in the range, and the code inside the loop is executed. - func hash(into hasher: inout Hasher) { - hasher.combine(id) - hasher.combine(content) - hasher.combine(participant) - hasher.combine(image) - hasher.combine(attachments) - // intentionally ignore `error` + **Here is another example of a simple loop in Swift:** + ```swift + var sum = 0 + for i in 1...100 { + sum += i } -} + print("The sum of the numbers from 1 to 100 is \\(sum).") + ``` -public extension ChatMessage { - static var samples: [ChatMessage] = [ - .init(content: "Hello. What can I do for you today?", participant: .other), - .init(content: "Show me a simple loop in Swift.", participant: .user), - .init(content: """ - Sure, here is a simple loop in Swift: - - # Example 1 - ``` - for i in 1...5 { - print("Hello, world!") - } - ``` - - This loop will print the string "Hello, world!" five times. The for loop iterates over a range of numbers, - in this case the numbers from 1 to 5. The variable i is assigned each number in the range, and the code inside the loop is executed. - - **Here is another example of a simple loop in Swift:** - ```swift - var sum = 0 - for i in 1...100 { - sum += i - } - print("The sum of the numbers from 1 to 100 is \\(sum).") - ``` - - This loop calculates the sum of the numbers from 1 to 100. The variable sum is initialized to 0, and then the for loop iterates over the range of numbers from 1 to 100. The variable i is assigned each number in the range, and the value of i is added to the sum variable. After the loop has finished executing, the value of sum is printed to the console. - """, participant: .other), - ] - - static var sample = samples[0] + This loop calculates the sum of the numbers from 1 to 100. The variable sum is initialized to 0, and then the for loop iterates over the range of numbers from 1 to 100. The variable i is assigned each number in the range, and the value of i is added to the sum variable. After the loop has finished executing, the value of sum is printed to the console. + """, participant: .other), + ] + + static var sample = samples[0] } public extension ChatMessage { - static func from(_ modelContent: ModelContent) -> ChatMessage? { - // TODO: add non-text parts to message when multi-model support is added - let text = modelContent.parts.compactMap { ($0 as? TextPart)?.text }.joined() - guard !text.isEmpty else { - return nil - } + static func from(_ modelContent: ModelContent) -> ChatMessage? { + // TODO: add non-text parts to message when multi-model support is added + let text = modelContent.parts.compactMap { ($0 as? TextPart)?.text }.joined() + guard !text.isEmpty else { + return nil + } - let participant: Participant = (modelContent.role == "user") ? .user : .other + let participant: Participant = (modelContent.role == "user") ? .user : .other - return ChatMessage(content: text, participant: participant) - } + return ChatMessage(content: text, participant: participant) + } - static func from(_ modelContents: [ModelContent]) -> [ChatMessage] { - return modelContents.compactMap { from($0) } - } + static func from(_ modelContents: [ModelContent]) -> [ChatMessage] { + return modelContents.compactMap { from($0) } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift b/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift index 870f90b7f..ac405951f 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Screens/ChatScreen.swift @@ -13,60 +13,59 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import ConversationKit import SwiftUI +import ConversationKit struct ChatScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ChatViewModel + let backendType: BackendOption + @StateObject var viewModel: ChatViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ChatViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ChatViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) - { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { _ in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { error in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } + } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - ChatScreen(backendType: .googleAI) + ChatScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift b/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift index f6e1e6c60..02d2beebe 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/ViewModels/ChatViewModel.swift @@ -13,177 +13,177 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine -import ConversationKit import Foundation import UIKit +import Combine +import ConversationKit @MainActor class ChatViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() - - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - @Published var error: Error? - var hasError: Bool { - return error != nil - } + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var presentErrorDetails: Bool = false + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var presentErrorDetails: Bool = false - private var model: GenerativeModel - private var chat: Chat + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var chatTask: Task? + private var model: GenerativeModel + private var chat: Chat - private var sample: Sample? - private var backendType: BackendOption + private var chatTask: Task? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + private var sample: Sample? + private var backendType: BackendOption - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - generationConfig: sample?.generationConfig, - systemInstruction: sample?.systemInstruction - ) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { - messages = ChatMessage.from(chatHistory) - chat = model.startChat(history: chatHistory) - } else { - chat = model.startChat() - } + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + generationConfig: sample?.generationConfig, + systemInstruction: sample?.systemInstruction + ) - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" + if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { + messages = ChatMessage.from(chatHistory) + chat = model.startChat(history: chatHistory) + } else { + chat = model.startChat() } - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) - } - } + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" + } - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } - - func stop() { - chatTask?.cancel() - error = nil + } + + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" + } + + func stop() { + chatTask?.cancel() + error = nil + } + + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } + + if let inlineDataPart = chunk.inlineDataParts.first { + if let uiImage = UIImage(data: inlineDataPart.data) { + messages[messages.count - 1].image = uiImage + } else { + print("Failed to convert inline data to UIImage") + } + } + } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - chatTask = Task { - busy = true - defer { - busy = false - } + chatTask = Task { + busy = true + defer { + busy = false + } - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } - - if let inlineDataPart = chunk.inlineDataParts.first { - if let uiImage = UIImage(data: inlineDataPart.data) { - messages[messages.count - 1].image = uiImage - } else { - print("Failed to convert inline data to UIImage") - } - } - } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } - } - } + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) - chatTask = Task { - busy = true - defer { - busy = false - } + do { + var response: GenerateContentResponse? + response = try await chat.sendMessage(text) - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var response: GenerateContentResponse? - response = try await chat.sendMessage(text) - - if let responseText = response?.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false - } - - if let inlineDataPart = response?.inlineDataParts.first { - if let uiImage = UIImage(data: inlineDataPart.data) { - messages[messages.count - 1].image = uiImage - } else { - print("Failed to convert inline data to UIImage") - } - } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + if let responseText = response?.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + } + + if let inlineDataPart = response?.inlineDataParts.first { + if let uiImage = UIImage(data: inlineDataPart.data) { + messages[messages.count - 1].image = uiImage + } else { + print("Failed to convert inline data to UIImage") + } } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift b/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift index ea4d080ae..6895e6723 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Views/BouncingDots.swift @@ -15,63 +15,63 @@ import SwiftUI struct BouncingDots: View { - @State - private var dot1YOffset: CGFloat = 0.0 + @State + private var dot1YOffset: CGFloat = 0.0 - @State - private var dot2YOffset: CGFloat = 0.0 + @State + private var dot2YOffset: CGFloat = 0.0 - @State - private var dot3YOffset: CGFloat = 0.0 + @State + private var dot3YOffset: CGFloat = 0.0 - let animation = Animation.easeInOut(duration: 0.8) - .repeatForever(autoreverses: true) + let animation = Animation.easeInOut(duration: 0.8) + .repeatForever(autoreverses: true) - var body: some View { - HStack(spacing: 8) { - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot1YOffset) - .onAppear { - withAnimation(self.animation.delay(0.0)) { - self.dot1YOffset = -5 - } - } - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot2YOffset) - .onAppear { - withAnimation(self.animation.delay(0.2)) { - self.dot2YOffset = -5 - } - } - Circle() - .fill(Color.white) - .frame(width: 10, height: 10) - .offset(y: dot3YOffset) - .onAppear { - withAnimation(self.animation.delay(0.4)) { - self.dot3YOffset = -5 - } - } + var body: some View { + HStack(spacing: 8) { + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot1YOffset) + .onAppear { + withAnimation(self.animation.delay(0.0)) { + self.dot1YOffset = -5 + } } + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot2YOffset) .onAppear { - let baseOffset: CGFloat = -2 - - self.dot1YOffset = baseOffset - self.dot2YOffset = baseOffset - self.dot3YOffset = baseOffset + withAnimation(self.animation.delay(0.2)) { + self.dot2YOffset = -5 + } + } + Circle() + .fill(Color.white) + .frame(width: 10, height: 10) + .offset(y: dot3YOffset) + .onAppear { + withAnimation(self.animation.delay(0.4)) { + self.dot3YOffset = -5 + } } } + .onAppear { + let baseOffset: CGFloat = -2 + + self.dot1YOffset = baseOffset + self.dot2YOffset = baseOffset + self.dot3YOffset = baseOffset + } + } } struct BouncingDots_Previews: PreviewProvider { - static var previews: some View { - BouncingDots() - .frame(width: 200, height: 50) - .background(.blue) - .roundedCorner(10, corners: [.allCorners]) - } + static var previews: some View { + BouncingDots() + .frame(width: 200, height: 50) + .background(.blue) + .roundedCorner(10, corners: [.allCorners]) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift b/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift index 99c2ad435..9b06b4d90 100644 --- a/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift +++ b/firebaseai/FirebaseAIExample/Features/Chat/Views/MessageView.swift @@ -16,156 +16,156 @@ import ConversationKit import MarkdownUI import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif struct RoundedCorner: Shape { - var radius: CGFloat = .infinity - var corners: UIRectCorner = .allCorners + var radius: CGFloat = .infinity + var corners: UIRectCorner = .allCorners - func path(in rect: CGRect) -> Path { - let path = UIBezierPath( - roundedRect: rect, - byRoundingCorners: corners, - cornerRadii: CGSize(width: radius, height: radius) - ) - return Path(path.cgPath) - } + func path(in rect: CGRect) -> Path { + let path = UIBezierPath( + roundedRect: rect, + byRoundingCorners: corners, + cornerRadii: CGSize(width: radius, height: radius) + ) + return Path(path.cgPath) + } } extension View { - func roundedCorner(_ radius: CGFloat, corners: UIRectCorner) -> some View { - clipShape(RoundedCorner(radius: radius, corners: corners)) - } + func roundedCorner(_ radius: CGFloat, corners: UIRectCorner) -> some View { + clipShape(RoundedCorner(radius: radius, corners: corners)) + } } struct MessageContentView: View { - @Environment(\.presentErrorAction) var presentErrorAction - var message: ChatMessage + @Environment(\.presentErrorAction) var presentErrorAction + var message: ChatMessage - var body: some View { - if message.pending { - BouncingDots() - } else { - // Error Message - if let error = message.error { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - presentErrorAction?(error) - } - .labelStyle(.iconOnly) - } - } else { - VStack(alignment: .leading, spacing: 8) { - if message.participant == .user && !message.attachments.isEmpty { - AttachmentPreviewScrollView(attachments: message.attachments) - } + var body: some View { + if message.pending { + BouncingDots() + } else { + // Error Message + if let error = message.error { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + presentErrorAction?(error) + } + .labelStyle(.iconOnly) + } + } else { + VStack(alignment: .leading, spacing: 8) { + if message.participant == .user && !message.attachments.isEmpty { + AttachmentPreviewScrollView(attachments: message.attachments) + } - if let image = message.image { - Image(uiImage: image) - .resizable() - .aspectRatio(contentMode: .fit) - .frame(maxWidth: 300, maxHeight: 300) - .clipShape(RoundedRectangle(cornerRadius: 8)) - } + if let image = message.image { + Image(uiImage: image) + .resizable() + .aspectRatio(contentMode: .fit) + .frame(maxWidth: 300, maxHeight: 300) + .clipShape(RoundedRectangle(cornerRadius: 8)) + } - // Grounded Response - if let groundingMetadata = message.groundingMetadata { - GroundedResponseView(message: message, groundingMetadata: groundingMetadata) - } else { - // Non-grounded response - ResponseTextView(message: message) - } - } - } + // Grounded Response + if let groundingMetadata = message.groundingMetadata { + GroundedResponseView(message: message, groundingMetadata: groundingMetadata) + } else { + // Non-grounded response + ResponseTextView(message: message) + } } + } } + } } struct ResponseTextView: View { - var message: ChatMessage + var message: ChatMessage - var body: some View { - Markdown(message.content ?? "") - .markdownTextStyle { - FontFamilyVariant(.normal) - FontSize(.em(0.85)) - ForegroundColor(message.participant == .other ? Color(UIColor.label) : .white) - } - .markdownBlockStyle(\.codeBlock) { configuration in - configuration.label - .relativeLineSpacing(.em(0.25)) - .markdownTextStyle { - FontFamilyVariant(.monospaced) - FontSize(.em(0.85)) - ForegroundColor(Color(.label)) - } - .padding() - .background(Color(.secondarySystemBackground)) - .clipShape(RoundedRectangle(cornerRadius: 8)) - .markdownMargin(top: .zero, bottom: .em(0.8)) - } - } + var body: some View { + Markdown(message.content ?? "") + .markdownTextStyle { + FontFamilyVariant(.normal) + FontSize(.em(0.85)) + ForegroundColor(message.participant == .other ? Color(UIColor.label) : .white) + } + .markdownBlockStyle(\.codeBlock) { configuration in + configuration.label + .relativeLineSpacing(.em(0.25)) + .markdownTextStyle { + FontFamilyVariant(.monospaced) + FontSize(.em(0.85)) + ForegroundColor(Color(.label)) + } + .padding() + .background(Color(.secondarySystemBackground)) + .clipShape(RoundedRectangle(cornerRadius: 8)) + .markdownMargin(top: .zero, bottom: .em(0.8)) + } + } } struct MessageView: View { - var message: ChatMessage + var message: ChatMessage - private var participantLabel: String { - message.participant == .user ? "User" : "Model" - } + private var participantLabel: String { + message.participant == .user ? "User" : "Model" + } - var body: some View { - VStack(alignment: message.participant == .user ? .trailing : .leading, spacing: 4) { - // Sender label - Text(participantLabel) - .font(.caption2) - .fontWeight(.medium) - .foregroundColor(.secondary) - .textCase(.uppercase) - .padding(.horizontal, 8) - .padding(.vertical, 2) - .frame(maxWidth: .infinity, alignment: message.participant == .user ? .trailing : .leading) + var body: some View { + VStack(alignment: message.participant == .user ? .trailing : .leading, spacing: 4) { + // Sender label + Text(participantLabel) + .font(.caption2) + .fontWeight(.medium) + .foregroundColor(.secondary) + .textCase(.uppercase) + .padding(.horizontal, 8) + .padding(.vertical, 2) + .frame(maxWidth: .infinity, alignment: message.participant == .user ? .trailing : .leading) - // Message content - HStack { - if message.participant == .user { - Spacer() - } - MessageContentView(message: message) - .padding(10) - .background(message.participant == .other - ? Color(UIColor.systemFill) - : Color(UIColor.systemBlue)) - .roundedCorner(10, - corners: [ - .topLeft, - .topRight, - message.participant == .other ? .bottomRight : .bottomLeft, - ]) - if message.participant == .other { - Spacer() - } - } + // Message content + HStack { + if message.participant == .user { + Spacer() + } + MessageContentView(message: message) + .padding(10) + .background(message.participant == .other + ? Color(UIColor.systemFill) + : Color(UIColor.systemBlue)) + .roundedCorner(10, + corners: [ + .topLeft, + .topRight, + message.participant == .other ? .bottomRight : .bottomLeft, + ]) + if message.participant == .other { + Spacer() } - .listRowSeparator(.hidden) + } } + .listRowSeparator(.hidden) + } } struct MessageView_Previews: PreviewProvider { - static var previews: some View { - NavigationView { - List { - MessageView(message: ChatMessage.samples[0]) - MessageView(message: ChatMessage.samples[1]) - MessageView(message: ChatMessage.samples[2]) - MessageView(message: ChatMessage(content: "Hello!", participant: .other, pending: true)) - } - .listStyle(.plain) - .navigationTitle("Chat example") - } + static var previews: some View { + NavigationView { + List { + MessageView(message: ChatMessage.samples[0]) + MessageView(message: ChatMessage.samples[1]) + MessageView(message: ChatMessage.samples[2]) + MessageView(message: ChatMessage(content: "Hello!", participant: .other, pending: true)) + } + .listStyle(.plain) + .navigationTitle("Chat example") } + } } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift index ad9a4288e..97d26081e 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Screens/FunctionCallingScreen.swift @@ -13,60 +13,59 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import ConversationKit import SwiftUI +import ConversationKit struct FunctionCallingScreen: View { - let backendType: BackendOption - @StateObject var viewModel: FunctionCallingViewModel + let backendType: BackendOption + @StateObject var viewModel: FunctionCallingViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: FunctionCallingViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: FunctionCallingViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) - { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { _ in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { error in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } + } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - FunctionCallingScreen(backendType: .googleAI) + FunctionCallingScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift index c9379b98e..8b257af1c 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/Services/WeatherService.swift @@ -13,19 +13,19 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import Foundation import UIKit class WeatherService { - public static func fetchWeather(city _: String, state _: String, date _: String) -> JSONObject { - return [ - "temperature": .number(38), - "chancePrecipitation": .string("56%"), - "cloudCover": .string("partlyCloudy"), - ] - } + public static func fetchWeather(city: String, state: String, date: String) -> JSONObject { + return [ + "temperature": .number(38), + "chancePrecipitation": .string("56%"), + "cloudCover": .string("partlyCloudy"), + ] + } } diff --git a/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift b/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift index a567cbc0d..215d513b2 100644 --- a/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/FunctionCalling/ViewModels/FunctionCallingViewModel.swift @@ -13,278 +13,276 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine -import ConversationKit import Foundation import UIKit +import Combine +import ConversationKit @MainActor class FunctionCallingViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var error: Error? - var hasError: Bool { - return error != nil - } + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var presentErrorDetails: Bool = false + @Published var presentErrorDetails: Bool = false - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var model: GenerativeModel - private var chat: Chat + private var model: GenerativeModel + private var chat: Chat - private var chatTask: Task? + private var chatTask: Task? - private var sample: Sample? - private var backendType: BackendOption + private var sample: Sample? + private var backendType: BackendOption - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - // create a generative model with sample data - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash-lite", - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) + // create a generative model with sample data + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash-lite", + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) + + chat = model.startChat() - chat = model.startChat() + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" + } - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } + } + + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" + } + + func stop() { + chatTask?.cancel() + error = nil + } + + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + + var functionCalls = [FunctionCallPart]() + + for try await chunk in responseStream { + if !chunk.functionCalls.isEmpty { + functionCalls.append(contentsOf: chunk.functionCalls) + } + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + messages[messages.count - 1].pending = false + } + } - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) + // On functionCalls, never keep reading the old stream or call the second API inside the first for-loop. + // Start a NEW stream only after the function response turn is sent. + if !functionCalls.isEmpty { + try await handleFunctionCallsStreaming(functionCalls) } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" - } + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - func stop() { - chatTask?.cancel() - error = nil - } + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - - var functionCalls = [FunctionCallPart]() - - for try await chunk in responseStream { - if !chunk.functionCalls.isEmpty { - functionCalls.append(contentsOf: chunk.functionCalls) - } - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - messages[messages.count - 1].pending = false - } - } - - // On functionCalls, never keep reading the old stream or call the second API inside the first for-loop. - // Start a NEW stream only after the function response turn is sent. - if !functionCalls.isEmpty { - try await handleFunctionCallsStreaming(functionCalls) - } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + do { + let response = try await chat.sendMessage(text) + + if !response.functionCalls.isEmpty { + try await handleFunctionCalls(response) + } else { + if let responseText = response.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false + } } + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } - - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - busy = true - defer { - busy = false - } - - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let response = try await chat.sendMessage(text) - - if !response.functionCalls.isEmpty { - try await handleFunctionCalls(response) - } else { - if let responseText = response.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false - } - } - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + } + + private func handleFunctionCallsStreaming(_ functionCalls: [FunctionCallPart]) async throws { + var functionResponses = [FunctionResponsePart]() + + for functionCall in functionCalls { + switch functionCall.name { + case "fetchWeather": + guard case let .string(city) = functionCall.args["city"], + case let .string(state) = functionCall.args["state"], + case let .string(date) = functionCall.args["date"] else { + throw NSError( + domain: "FunctionCallingError", + code: 0, + userInfo: [ + NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", + ] + ) } + + functionResponses.append( + FunctionResponsePart( + name: functionCall.name, + response: WeatherService.fetchWeather(city: city, state: state, date: date) + ) + ) + default: + print("Unknown function named \"\(functionCall.name)\".") + } } - private func handleFunctionCallsStreaming(_ functionCalls: [FunctionCallPart]) async throws { - var functionResponses = [FunctionResponsePart]() - - for functionCall in functionCalls { - switch functionCall.name { - case "fetchWeather": - guard case let .string(city) = functionCall.args["city"], - case let .string(state) = functionCall.args["state"], - case let .string(date) = functionCall.args["date"] - else { - throw NSError( - domain: "FunctionCallingError", - code: 0, - userInfo: [ - NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", - ] - ) - } - - functionResponses.append( - FunctionResponsePart( - name: functionCall.name, - response: WeatherService.fetchWeather(city: city, state: state, date: date) - ) - ) - default: - print("Unknown function named \"\(functionCall.name)\".") - } + if !functionResponses.isEmpty { + let finalResponse = try chat + .sendMessageStream([ModelContent(role: "function", parts: functionResponses)]) + + for try await chunk in finalResponse { + guard let candidate = chunk.candidates.first else { + throw NSError( + domain: "FunctionCallingError", + code: 1, + userInfo: [NSLocalizedDescriptionKey: "No candidate in response chunk"] + ) } - if !functionResponses.isEmpty { - let finalResponse = try chat - .sendMessageStream([ModelContent(role: "function", parts: functionResponses)]) - - for try await chunk in finalResponse { - guard let candidate = chunk.candidates.first else { - throw NSError( - domain: "FunctionCallingError", - code: 1, - userInfo: [NSLocalizedDescriptionKey: "No candidate in response chunk"] - ) - } - - for part in candidate.content.parts { - if let textPart = part as? TextPart { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + textPart.text - messages[messages.count - 1].pending = false - } - } - } + for part in candidate.content.parts { + if let textPart = part as? TextPart { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + textPart.text + messages[messages.count - 1].pending = false + } } + } } - - private func handleFunctionCalls(_ response: GenerateContentResponse) async throws { - var functionResponses = [FunctionResponsePart]() - - for functionCall in response.functionCalls { - switch functionCall.name { - case "fetchWeather": - guard case let .string(city) = functionCall.args["city"], - case let .string(state) = functionCall.args["state"], - case let .string(date) = functionCall.args["date"] - else { - throw NSError( - domain: "FunctionCallingError", - code: 0, - userInfo: [ - NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", - ] - ) - } - - functionResponses.append( - FunctionResponsePart( - name: functionCall.name, - response: WeatherService.fetchWeather(city: city, state: state, date: date) - ) - ) - default: - print("Unknown function named \"\(functionCall.name)\".") - } + } + + private func handleFunctionCalls(_ response: GenerateContentResponse) async throws { + var functionResponses = [FunctionResponsePart]() + + for functionCall in response.functionCalls { + switch functionCall.name { + case "fetchWeather": + guard case let .string(city) = functionCall.args["city"], + case let .string(state) = functionCall.args["state"], + case let .string(date) = functionCall.args["date"] else { + throw NSError( + domain: "FunctionCallingError", + code: 0, + userInfo: [ + NSLocalizedDescriptionKey: "Malformed arguments for fetchWeather: \(functionCall.args)", + ] + ) } - if !functionResponses.isEmpty { - let finalResponse = try await chat - .sendMessage([ModelContent(role: "function", parts: functionResponses)]) - - guard let candidate = finalResponse.candidates.first else { - throw NSError( - domain: "FunctionCallingError", - code: 1, - userInfo: [NSLocalizedDescriptionKey: "No candidate in response"] - ) - } - - for part in candidate.content.parts { - if let textPart = part as? TextPart { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + textPart.text - messages[messages.count - 1].pending = false - } - } + functionResponses.append( + FunctionResponsePart( + name: functionCall.name, + response: WeatherService.fetchWeather(city: city, state: state, date: date) + ) + ) + default: + print("Unknown function named \"\(functionCall.name)\".") + } + } + + if !functionResponses.isEmpty { + let finalResponse = try await chat + .sendMessage([ModelContent(role: "function", parts: functionResponses)]) + + guard let candidate = finalResponse.candidates.first else { + throw NSError( + domain: "FunctionCallingError", + code: 1, + userInfo: [NSLocalizedDescriptionKey: "No candidate in response"] + ) + } + + for part in candidate.content.parts { + if let textPart = part as? TextPart { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + textPart.text + messages[messages.count - 1].pending = false } + } } + } } diff --git a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift index 43f591b2c..539cf95f7 100644 --- a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/Screens/GenerateContentFromTemplateScreen.swift @@ -16,95 +16,95 @@ import ConversationKit import MarkdownUI import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif struct GenerateContentFromTemplateScreen: View { - let backendType: BackendOption - @StateObject var viewModel: GenerateContentFromTemplateViewModel + let backendType: BackendOption + @StateObject var viewModel: GenerateContentFromTemplateViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: GenerateContentFromTemplateViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: GenerateContentFromTemplateViewModel(backendType: backendType, + sample: sample)) + } - enum FocusedField: Hashable { - case message - } + enum FocusedField: Hashable { + case message + } - @FocusState - var focusedField: FocusedField? + @FocusState + var focusedField: FocusedField? - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $viewModel.userInput) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $viewModel.userInput) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) - } - } + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) + } + } - HStack(alignment: .top) { - Image(systemName: "text.bubble.fill") - .font(.title2) + HStack(alignment: .top) { + Image(systemName: "text.bubble.fill") + .font(.title2) - Markdown(viewModel.content) - } - .padding() - } - } - if viewModel.inProgress { - ProgressOverlay() - } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .navigationTitle("Story teller") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message + Markdown(viewModel.content) + } + .padding() } + } + if viewModel.inProgress { + ProgressOverlay() + } + } + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + } + .navigationTitle("Story teller") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message } + } - private func sendMessage() { - Task { - await viewModel.generateContent(prompt: viewModel.userInput) - focusedField = .message - } + private func sendMessage() { + Task { + await viewModel.generateContent(prompt: viewModel.userInput) + focusedField = .message } + } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() - } + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() } + } } #Preview { - NavigationStack { - GenerateContentFromTemplateScreen(backendType: .googleAI) - } + NavigationStack { + GenerateContentFromTemplateScreen(backendType: .googleAI) + } } diff --git a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift index f0ced4875..642428417 100644 --- a/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/GenerativeAIText/ViewModels/GenerateContentFromTemplateViewModel.swift @@ -13,98 +13,98 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine import Foundation import OSLog import SwiftUI +import Combine @MainActor class GenerateContentFromTemplateViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var userInput: String = "" + @Published + var userInput: String = "" - @Published - var content: String = "" + @Published + var content: String = "" - @Published - var error: Error? - var hasError: Bool { - return error != nil - } + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - @Published - var presentErrorDetails: Bool = false + @Published + var presentErrorDetails: Bool = false - @Published - var inProgress = false + @Published + var inProgress = false - private let model: TemplateGenerativeModel - private var backendType: BackendOption + private let model: TemplateGenerativeModel + private var backendType: BackendOption - private var generateContentTask: Task? + private var generateContentTask: Task? - private var sample: Sample? + private var sample: Sample? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - model = firebaseService.templateGenerativeModel() + model = firebaseService.templateGenerativeModel() - if let sample { - userInput = sample.initialPrompt ?? "" - } + if let sample { + userInput = sample.initialPrompt ?? "" } - - func generateContent(prompt: String) async { - stop() - - generateContentTask = Task { - inProgress = true - defer { - inProgress = false - } - - // Clear previous content before generating new content - content = "" - - do { - let responseStream = try model.generateContentStream( - templateID: "apple-qs-greeting", - inputs: [ - "name": prompt, - "language": "Spanish", - ] - ) - - for try await chunk in responseStream { - if let text = chunk.text { - if !Task.isCancelled { - content += text - } - } - } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating content from template: \(error)") - } + } + + func generateContent(prompt: String) async { + stop() + + generateContentTask = Task { + inProgress = true + defer { + inProgress = false + } + + // Clear previous content before generating new content + content = "" + + do { + let responseStream = try model.generateContentStream( + templateID: "apple-qs-greeting", + inputs: [ + "name": prompt, + "language": "Spanish", + ] + ) + + for try await chunk in responseStream { + if let text = chunk.text { + if !Task.isCancelled { + content += text } + } } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating content from template: \(error)") + } + } } + } - func stop() { - generateContentTask?.cancel() - generateContentTask = nil - } + func stop() { + generateContentTask?.cancel() + generateContentTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift index bc3f451fa..77bc414da 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Screens/GroundingScreen.swift @@ -13,60 +13,59 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import ConversationKit import SwiftUI +import ConversationKit struct GroundingScreen: View { - let backendType: BackendOption - @StateObject var viewModel: GroundingViewModel + let backendType: BackendOption + @StateObject var viewModel: GroundingViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: GroundingViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: GroundingViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - userPrompt: viewModel.initialPrompt) - { message in - MessageView(message: message) - } - .disableAttachments() - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { _ in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } - } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + userPrompt: viewModel.initialPrompt) { message in + MessageView(message: message) + } + .disableAttachments() + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { error in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) } + } + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } + } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) } + } - private func newChat() { - viewModel.startNewChat() - } + private func newChat() { + viewModel.startNewChat() + } } #Preview { - GroundingScreen(backendType: .googleAI) + GroundingScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift b/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift index ec327647d..7085cd8b5 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/ViewModels/GroundingViewModel.swift @@ -13,171 +13,171 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine -import ConversationKit import Foundation +import Combine import UIKit +import ConversationKit @MainActor class GroundingViewModel: ObservableObject { - /// This array holds both the user's and the system's chat messages - @Published var messages = [ChatMessage]() + /// This array holds both the user's and the system's chat messages + @Published var messages = [ChatMessage]() - /// Indicates we're waiting for the model to finish - @Published var busy = false + /// Indicates we're waiting for the model to finish + @Published var busy = false - @Published var error: Error? - var hasError: Bool { - return error != nil - } + @Published var error: Error? + var hasError: Bool { + return error != nil + } - @Published var presentErrorDetails: Bool = false + @Published var presentErrorDetails: Bool = false - @Published var initialPrompt: String = "" - @Published var title: String = "" + @Published var initialPrompt: String = "" + @Published var title: String = "" - private var model: GenerativeModel - private var chat: Chat + private var model: GenerativeModel + private var chat: Chat - private var chatTask: Task? + private var chatTask: Task? - private var sample: Sample? + private var sample: Sample? - private var backendType: BackendOption + private var backendType: BackendOption - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) - chat = model.startChat() + chat = model.startChat() - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" - } + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" + } - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) - } else { - await internalSendMessage(text) - } + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) } + } + + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + initialPrompt = "" + } + + func stop() { + chatTask?.cancel() + error = nil + } + + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + busy = true + defer { + busy = false + } + + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) + + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + let responseStream = try chat.sendMessageStream(text) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } + + if let candidate = chunk.candidates.first { + if let groundingMetadata = candidate.groundingMetadata { + self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata + } + } + } - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() - initialPrompt = "" + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } - func stop() { - chatTask?.cancel() - error = nil - } + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() + chatTask = Task { + busy = true + defer { + busy = false + } - chatTask = Task { - busy = true - defer { - busy = false - } + // first, add the user's message to the chat + let userMessage = ChatMessage(content: text, participant: .user) + messages.append(userMessage) - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - let responseStream = try chat.sendMessageStream(text) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } - - if let candidate = chunk.candidates.first { - if let groundingMetadata = candidate.groundingMetadata { - self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata - } - } - } - - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } - } - } + // add a pending message while we're waiting for a response from the backend + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() + do { + var response: GenerateContentResponse? + response = try await chat.sendMessage(text) - chatTask = Task { - busy = true - defer { - busy = false - } + if let responseText = response?.text { + // replace pending message with backend response + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false - // first, add the user's message to the chat - let userMessage = ChatMessage(content: text, participant: .user) - messages.append(userMessage) - - // add a pending message while we're waiting for a response from the backend - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var response: GenerateContentResponse? - response = try await chat.sendMessage(text) - - if let responseText = response?.text { - // replace pending message with backend response - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false - - if let candidate = response?.candidates.first { - if let groundingMetadata = candidate.groundingMetadata { - self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata - } - } - } - - } catch { - self.error = error - print(error.localizedDescription) - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage + if let candidate = response?.candidates.first { + if let groundingMetadata = candidate.groundingMetadata { + self.messages[self.messages.count - 1].groundingMetadata = groundingMetadata } + } } + + } catch { + self.error = error + print(error.localizedDescription) + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift index 7302f5fdb..eaf66c076 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GoogleSearchSuggestionView.swift @@ -20,61 +20,60 @@ import WebKit /// This is added to the bottom of chat messages containing results grounded /// in Google Search. struct GoogleSearchSuggestionView: UIViewRepresentable { - let htmlString: String + let htmlString: String - // This Coordinator class will act as the web view's navigation delegate. - class Coordinator: NSObject, WKNavigationDelegate { - func webView(_: WKWebView, - decidePolicyFor navigationAction: WKNavigationAction, - decisionHandler: @escaping (WKNavigationActionPolicy) -> Void) - { - // Check if the navigation was triggered by a user clicking a link. - if navigationAction.navigationType == .linkActivated { - if let url = navigationAction.request.url { - // Open the URL in the system's default browser (e.g., Safari). - UIApplication.shared.open(url) - } - // Cancel the navigation inside our small web view. - decisionHandler(.cancel) - return - } - // For all other navigation types (like the initial HTML load), allow it. - decisionHandler(.allow) + // This Coordinator class will act as the web view's navigation delegate. + class Coordinator: NSObject, WKNavigationDelegate { + func webView(_ webView: WKWebView, + decidePolicyFor navigationAction: WKNavigationAction, + decisionHandler: @escaping (WKNavigationActionPolicy) -> Void) { + // Check if the navigation was triggered by a user clicking a link. + if navigationAction.navigationType == .linkActivated { + if let url = navigationAction.request.url { + // Open the URL in the system's default browser (e.g., Safari). + UIApplication.shared.open(url) } + // Cancel the navigation inside our small web view. + decisionHandler(.cancel) + return + } + // For all other navigation types (like the initial HTML load), allow it. + decisionHandler(.allow) } + } - func makeCoordinator() -> Coordinator { - Coordinator() - } + func makeCoordinator() -> Coordinator { + Coordinator() + } - func makeUIView(context: Context) -> WKWebView { - let webView = WKWebView() - webView.isOpaque = false - webView.backgroundColor = .clear - webView.scrollView.backgroundColor = .clear - webView.scrollView.isScrollEnabled = false - // Set the coordinator as the navigation delegate. - webView.navigationDelegate = context.coordinator - return webView - } + func makeUIView(context: Context) -> WKWebView { + let webView = WKWebView() + webView.isOpaque = false + webView.backgroundColor = .clear + webView.scrollView.backgroundColor = .clear + webView.scrollView.isScrollEnabled = false + // Set the coordinator as the navigation delegate. + webView.navigationDelegate = context.coordinator + return webView + } - func updateUIView(_ uiView: WKWebView, context _: Context) { - // The renderedContent is an HTML snippet with CSS. - // For it to render correctly, we wrap it in a basic HTML document structure. - let fullHTML = """ - - - - - - - - \(htmlString) - - - """ - uiView.loadHTMLString(fullHTML, baseURL: nil) - } + func updateUIView(_ uiView: WKWebView, context: Context) { + // The renderedContent is an HTML snippet with CSS. + // For it to render correctly, we wrap it in a basic HTML document structure. + let fullHTML = """ + + + + + + + + \(htmlString) + + + """ + uiView.loadHTMLString(fullHTML, baseURL: nil) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift index 9d10f56ca..ea0501926 100644 --- a/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift +++ b/firebaseai/FirebaseAIExample/Features/Grounding/Views/GroundedResponseView.swift @@ -13,73 +13,73 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import SwiftUI /// A view that displays a chat message that is grounded in Google Search. struct GroundedResponseView: View { - var message: ChatMessage - var groundingMetadata: GroundingMetadata + var message: ChatMessage + var groundingMetadata: GroundingMetadata - var body: some View { - // We can only display a response grounded in Google Search if the searchEntrypoint is non-nil. - let isCompliant = (groundingMetadata.groundingChunks.isEmpty || groundingMetadata - .searchEntryPoint != nil) - if isCompliant { - HStack(alignment: .top, spacing: 8) { - VStack(alignment: .leading, spacing: 8) { - // Message text - ResponseTextView(message: message) + var body: some View { + // We can only display a response grounded in Google Search if the searchEntrypoint is non-nil. + let isCompliant = (groundingMetadata.groundingChunks.isEmpty || groundingMetadata + .searchEntryPoint != nil) + if isCompliant { + HStack(alignment: .top, spacing: 8) { + VStack(alignment: .leading, spacing: 8) { + // Message text + ResponseTextView(message: message) - if !groundingMetadata.groundingChunks.isEmpty { - Divider() - // Source links - ForEach(0 ..< groundingMetadata.groundingChunks.count, id: \.self) { index in - if let webChunk = groundingMetadata.groundingChunks[index].web { - SourceLinkView( - title: webChunk.title ?? "Untitled Source", - uri: webChunk.uri - ) - } - } - } - // Search suggestions - if let searchEntryPoint = groundingMetadata.searchEntryPoint { - Divider() - GoogleSearchSuggestionView(htmlString: searchEntryPoint.renderedContent) - .frame(height: 44) - .clipShape(RoundedRectangle(cornerRadius: 22)) - } - } + if !groundingMetadata.groundingChunks.isEmpty { + Divider() + // Source links + ForEach(0 ..< groundingMetadata.groundingChunks.count, id: \.self) { index in + if let webChunk = groundingMetadata.groundingChunks[index].web { + SourceLinkView( + title: webChunk.title ?? "Untitled Source", + uri: webChunk.uri + ) + } } - .frame(maxWidth: .infinity, alignment: .leading) + } + // Search suggestions + if let searchEntryPoint = groundingMetadata.searchEntryPoint { + Divider() + GoogleSearchSuggestionView(htmlString: searchEntryPoint.renderedContent) + .frame(height: 44) + .clipShape(RoundedRectangle(cornerRadius: 22)) + } } + } + .frame(maxWidth: .infinity, alignment: .leading) } + } } /// A view for a single, clickable source link. struct SourceLinkView: View { - let title: String - let uri: String? + let title: String + let uri: String? - var body: some View { - if let uri, let url = URL(string: uri) { - Link(destination: url) { - HStack(spacing: 4) { - Image(systemName: "link") - .font(.caption) - .foregroundColor(.secondary) - Text(title) - .font(.footnote) - .underline() - .lineLimit(1) - .multilineTextAlignment(.leading) - } - } - .buttonStyle(.plain) + var body: some View { + if let uri, let url = URL(string: uri) { + Link(destination: url) { + HStack(spacing: 4) { + Image(systemName: "link") + .font(.caption) + .foregroundColor(.secondary) + Text(title) + .font(.footnote) + .underline() + .lineLimit(1) + .multilineTextAlignment(.leading) } + } + .buttonStyle(.plain) } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift index 2ed016ad5..7e762dbc0 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateScreen.swift @@ -14,101 +14,101 @@ import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit struct ImagenFromTemplateScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ImagenFromTemplateViewModel + let backendType: BackendOption + @StateObject var viewModel: ImagenFromTemplateViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ImagenFromTemplateViewModel(backendType: backendType, - sample: sample)) - } - - enum FocusedField: Hashable { - case message - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ImagenFromTemplateViewModel(backendType: backendType, + sample: sample)) + } - @FocusState - var focusedField: FocusedField? + enum FocusedField: Hashable { + case message + } - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $viewModel.userInput) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + @FocusState + var focusedField: FocusedField? - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) - } - } + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $viewModel.userInput) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - let spacing: CGFloat = 10 - LazyVGrid(columns: [ - GridItem(.flexible(), spacing: spacing), - GridItem(.flexible(), spacing: spacing), - ], spacing: spacing) { - ForEach(viewModel.images, id: \.self) { image in - Image(uiImage: image) - .resizable() - .aspectRatio(1, contentMode: .fill) - .cornerRadius(12) - .clipped() - } - } - .padding(.horizontal, spacing) - } - } - if viewModel.inProgress { - ProgressOverlay() + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) + } + + let spacing: CGFloat = 10 + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: spacing), + GridItem(.flexible(), spacing: spacing), + ], spacing: spacing) { + ForEach(viewModel.images, id: \.self) { image in + Image(uiImage: image) + .resizable() + .aspectRatio(1, contentMode: .fill) + .cornerRadius(12) + .clipped() } + } + .padding(.horizontal, spacing) } - .navigationTitle("Imagen Template") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message - } + } + if viewModel.inProgress { + ProgressOverlay() + } + } + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } } + .navigationTitle("Imagen Template") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message + } + } - private func sendMessage() { - Task { - await viewModel.generateImageFromTemplate(prompt: viewModel.userInput) - focusedField = .message - } + private func sendMessage() { + Task { + await viewModel.generateImageFromTemplate(prompt: viewModel.userInput) + focusedField = .message } + } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() - } + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() } + } } #Preview { - ImagenFromTemplateScreen(backendType: .googleAI) + ImagenFromTemplateScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift index d5d72ead4..50c821d0c 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenFromTemplateViewModel.swift @@ -13,14 +13,14 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine import Foundation import OSLog import SwiftUI +import Combine // Template Details // @@ -37,82 +37,82 @@ import SwiftUI @MainActor class ImagenFromTemplateViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var userInput: String = "" + @Published + var userInput: String = "" - @Published - var images = [UIImage]() + @Published + var images = [UIImage]() - @Published - var error: Error? - var hasError: Bool { - return error != nil - } + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - @Published - var presentErrorDetails: Bool = false + @Published + var presentErrorDetails: Bool = false - @Published - var inProgress = false + @Published + var inProgress = false - private let model: TemplateImagenModel - private var backendType: BackendOption + private let model: TemplateImagenModel + private var backendType: BackendOption - private var generateImagesTask: Task? + private var generateImagesTask: Task? - private var sample: Sample? + private var sample: Sample? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - model = firebaseService.templateImagenModel() - } + model = firebaseService.templateImagenModel() + } + + func generateImageFromTemplate(prompt: String) async { + stop() + + generateImagesTask = Task { + inProgress = true + defer { + inProgress = false + } - func generateImageFromTemplate(prompt: String) async { - stop() - - generateImagesTask = Task { - inProgress = true - defer { - inProgress = false - } - - do { - // 1. Call generateImages with the text prompt - let response = try await model.generateImages( - templateID: "imagen-generation-basic", - inputs: [ - "prompt": prompt, - ] - ) - - // 2. Print the reason images were filtered out, if any. - if let filteredReason = response.filteredReason { - print("Image(s) Blocked: \(filteredReason)") - } - - if !Task.isCancelled { - // 3. Convert the image data to UIImage for display in the UI - images = response.images.compactMap { UIImage(data: $0.data) } - } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating images from template: \(error)") - } - } + do { + // 1. Call generateImages with the text prompt + let response = try await model.generateImages( + templateID: "imagen-generation-basic", + inputs: [ + "prompt": prompt, + ] + ) + + // 2. Print the reason images were filtered out, if any. + if let filteredReason = response.filteredReason { + print("Image(s) Blocked: \(filteredReason)") } - } - func stop() { - generateImagesTask?.cancel() - generateImagesTask = nil + if !Task.isCancelled { + // 3. Convert the image data to UIImage for display in the UI + images = response.images.compactMap { UIImage(data: $0.data) } + } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating images from template: \(error)") + } + } } + } + + func stop() { + generateImagesTask?.cancel() + generateImagesTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift index dd527fd6e..76a2bbb2e 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenScreen.swift @@ -14,107 +14,107 @@ import SwiftUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit struct ImagenScreen: View { - let backendType: BackendOption - @StateObject var viewModel: ImagenViewModel + let backendType: BackendOption + @StateObject var viewModel: ImagenViewModel - @State - private var userPrompt = "" + @State + private var userPrompt = "" - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: ImagenViewModel(backendType: backendType, - sample: sample)) - } - - enum FocusedField: Hashable { - case message - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: ImagenViewModel(backendType: backendType, + sample: sample)) + } - @FocusState - var focusedField: FocusedField? + enum FocusedField: Hashable { + case message + } - var body: some View { - ZStack { - ScrollView { - VStack { - MessageComposerView(message: $userPrompt) - .padding(.bottom, 10) - .focused($focusedField, equals: .message) - .disableAttachments() - .onSubmitAction { sendOrStop() } + @FocusState + var focusedField: FocusedField? - if viewModel.error != nil { - HStack { - Text("An error occurred.") - Button("More information", systemImage: "info.circle") { - viewModel.presentErrorDetails = true - } - .labelStyle(.iconOnly) - } - } + var body: some View { + ZStack { + ScrollView { + VStack { + MessageComposerView(message: $userPrompt) + .padding(.bottom, 10) + .focused($focusedField, equals: .message) + .disableAttachments() + .onSubmitAction { sendOrStop() } - let spacing: CGFloat = 10 - LazyVGrid(columns: [ - GridItem(.flexible(), spacing: spacing), - GridItem(.flexible(), spacing: spacing), - ], spacing: spacing) { - ForEach(viewModel.images, id: \.self) { image in - Image(uiImage: image) - .resizable() - .aspectRatio(1, contentMode: .fill) - .cornerRadius(12) - .clipped() - } - } - .padding(.horizontal, spacing) - } - } - if viewModel.inProgress { - ProgressOverlay() - } - } - .onTapGesture { - focusedField = nil - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) + if viewModel.error != nil { + HStack { + Text("An error occurred.") + Button("More information", systemImage: "info.circle") { + viewModel.presentErrorDetails = true + } + .labelStyle(.iconOnly) } - } - .navigationTitle("Imagen example") - .navigationBarTitleDisplayMode(.inline) - .onAppear { - focusedField = .message - if userPrompt.isEmpty && !viewModel.initialPrompt.isEmpty { - userPrompt = viewModel.initialPrompt + } + + let spacing: CGFloat = 10 + LazyVGrid(columns: [ + GridItem(.flexible(), spacing: spacing), + GridItem(.flexible(), spacing: spacing), + ], spacing: spacing) { + ForEach(viewModel.images, id: \.self) { image in + Image(uiImage: image) + .resizable() + .aspectRatio(1, contentMode: .fill) + .cornerRadius(12) + .clipped() } + } + .padding(.horizontal, spacing) } + } + if viewModel.inProgress { + ProgressOverlay() + } + } + .onTapGesture { + focusedField = nil + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) + } } + .navigationTitle("Imagen example") + .navigationBarTitleDisplayMode(.inline) + .onAppear { + focusedField = .message + if userPrompt.isEmpty && !viewModel.initialPrompt.isEmpty { + userPrompt = viewModel.initialPrompt + } + } + } - private func sendMessage() { - Task { - await viewModel.generateImage(prompt: userPrompt) - focusedField = .message - } + private func sendMessage() { + Task { + await viewModel.generateImage(prompt: userPrompt) + focusedField = .message } + } - private func sendOrStop() { - if viewModel.inProgress { - viewModel.stop() - } else { - sendMessage() - } + private func sendOrStop() { + if viewModel.inProgress { + viewModel.stop() + } else { + sendMessage() } + } } #Preview { - ImagenScreen(backendType: .googleAI) + ImagenScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift index 144fa3a0d..2328f83fe 100644 --- a/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Imagen/ImagenViewModel.swift @@ -13,102 +13,102 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import Combine import Foundation +import Combine import OSLog import SwiftUI @MainActor class ImagenViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - @Published - var initialPrompt: String = "" + @Published + var initialPrompt: String = "" - @Published - var images = [UIImage]() + @Published + var images = [UIImage]() - @Published - var error: Error? - var hasError: Bool { - return error != nil - } + @Published + var error: Error? + var hasError: Bool { + return error != nil + } - @Published - var presentErrorDetails: Bool = false + @Published + var presentErrorDetails: Bool = false - @Published - var inProgress = false + @Published + var inProgress = false - private let model: ImagenModel - private var backendType: BackendOption + private let model: ImagenModel + private var backendType: BackendOption - private var generateImagesTask: Task? + private var generateImagesTask: Task? - private var sample: Sample? + private var sample: Sample? - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) - let modelName = "imagen-4.0-generate-001" - let safetySettings = ImagenSafetySettings( - safetyFilterLevel: .blockLowAndAbove - ) - var generationConfig = ImagenGenerationConfig() - generationConfig.numberOfImages = 4 - generationConfig.aspectRatio = .square1x1 + let modelName = "imagen-4.0-generate-001" + let safetySettings = ImagenSafetySettings( + safetyFilterLevel: .blockLowAndAbove + ) + var generationConfig = ImagenGenerationConfig() + generationConfig.numberOfImages = 4 + generationConfig.aspectRatio = .square1x1 - model = firebaseService.imagenModel( - modelName: modelName, - generationConfig: generationConfig, - safetySettings: safetySettings - ) + model = firebaseService.imagenModel( + modelName: modelName, + generationConfig: generationConfig, + safetySettings: safetySettings + ) - initialPrompt = sample?.initialPrompt ?? "" - } + initialPrompt = sample?.initialPrompt ?? "" + } + + func generateImage(prompt: String) async { + stop() + + generateImagesTask = Task { + inProgress = true + defer { + inProgress = false + } - func generateImage(prompt: String) async { - stop() - - generateImagesTask = Task { - inProgress = true - defer { - inProgress = false - } - - do { - // 1. Call generateImages with the text prompt - let response = try await model.generateImages(prompt: prompt) - - // 2. Print the reason images were filtered out, if any. - if let filteredReason = response.filteredReason { - print("Image(s) Blocked: \(filteredReason)") - } - - if !Task.isCancelled { - // 3. Convert the image data to UIImage for display in the UI - images = response.images.compactMap { UIImage(data: $0.data) } - } - } catch { - if !Task.isCancelled { - self.error = error - logger.error("Error generating images: \(error)") - } - } + do { + // 1. Call generateImages with the text prompt + let response = try await model.generateImages(prompt: prompt) + + // 2. Print the reason images were filtered out, if any. + if let filteredReason = response.filteredReason { + print("Image(s) Blocked: \(filteredReason)") } - } - func stop() { - generateImagesTask?.cancel() - generateImagesTask = nil + if !Task.isCancelled { + // 3. Convert the image data to UIImage for display in the UI + images = response.images.compactMap { UIImage(data: $0.data) } + } + } catch { + if !Task.isCancelled { + self.error = error + logger.error("Error generating images: \(error)") + } + } } + } + + func stop() { + generateImagesTask?.cancel() + generateImagesTask = nil + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift b/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift index c00dd4efb..941a38a7c 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Screens/LiveScreen.swift @@ -13,62 +13,62 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import SwiftUI import TipKit struct LiveScreen: View { - let backendType: BackendOption - @StateObject var viewModel: LiveViewModel + let backendType: BackendOption + @StateObject var viewModel: LiveViewModel - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: LiveViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: LiveViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - VStack(spacing: 20) { - ModelAvatar(isConnected: viewModel.state == .connected) - TranscriptView(typewriter: viewModel.transcriptTypewriter) + var body: some View { + VStack(spacing: 20) { + ModelAvatar(isConnected: viewModel.state == .connected) + TranscriptView(typewriter: viewModel.transcriptTypewriter) - Spacer() - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - if let tip = viewModel.tip, !viewModel.hasTranscripts { - TipView(tip) - } - ConnectButton( - state: viewModel.state, - onConnect: viewModel.connect, - onDisconnect: viewModel.disconnect - ) + Spacer() + if let error = viewModel.error { + ErrorDetailsView(error: error) + } + if let tip = viewModel.tip, !viewModel.hasTranscripts { + TipView(tip) + } + ConnectButton( + state: viewModel.state, + onConnect: viewModel.connect, + onDisconnect: viewModel.disconnect + ) - #if targetEnvironment(simulator) - AudioOutputToggle(isEnabled: $viewModel.isAudioOutputEnabled, onChange: { - Task { - await viewModel.onAudioPlaybackChanged() - } - }) - #endif - } - .padding() - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) - .background(viewModel.backgroundColor ?? .clear) - .onDisappear { - Task { - await viewModel.disconnect() - } - } + #if targetEnvironment(simulator) + AudioOutputToggle(isEnabled: $viewModel.isAudioOutputEnabled, onChange: { + Task { + await viewModel.onAudioPlaybackChanged() + } + }) + #endif + } + .padding() + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) + .background(viewModel.backgroundColor ?? .clear) + .onDisappear { + Task { + await viewModel.disconnect() + } } + } } #Preview { - LiveScreen(backendType: .googleAI) + LiveScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift index a3e041cd1..8910a27c8 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/LiveViewModel.swift @@ -12,286 +12,286 @@ // See the License for the specific language governing permissions and // limitations under the License. -import AVFoundation -import AVKit -import Combine import FirebaseAILogic import Foundation import OSLog +import AVFoundation import SwiftUI +import AVKit +import Combine enum LiveViewModelState { - case idle - case connecting - case connected + case idle + case connecting + case connected } @MainActor class LiveViewModel: ObservableObject { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - - @Published - var error: Error? - - @Published - var state: LiveViewModelState = .idle - - @Published - var transcriptTypewriter: TypeWriterViewModel = .init() - - @Published - var backgroundColor: Color? = nil - - @Published - var hasTranscripts: Bool = false - - @Published - var title: String - - @Published - var tip: InlineTip? - - @Published - var isAudioOutputEnabled: Bool = { - #if targetEnvironment(simulator) - return false - #else - return true - #endif - }() - - private var model: LiveGenerativeModel? - private var liveSession: LiveSession? - - private var audioController: AudioController? - private var microphoneTask = Task {} - - init(backendType: BackendOption, sample: Sample? = nil) { - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) - - model = firebaseService.liveModel( - modelName: (backendType == .googleAI) ? "gemini-2.5-flash-native-audio-preview-09-2025" : - "gemini-live-2.5-flash-preview-native-audio-09-2025", - generationConfig: sample?.liveGenerationConfig, - tools: sample?.tools, - systemInstruction: sample?.systemInstruction - ) - title = sample?.title ?? "" - tip = sample?.tip + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + + @Published + var error: Error? + + @Published + var state: LiveViewModelState = .idle + + @Published + var transcriptTypewriter: TypeWriterViewModel = TypeWriterViewModel() + + @Published + var backgroundColor: Color? = nil + + @Published + var hasTranscripts: Bool = false + + @Published + var title: String + + @Published + var tip: InlineTip? + + @Published + var isAudioOutputEnabled: Bool = { + #if targetEnvironment(simulator) + return false + #else + return true + #endif + }() + + private var model: LiveGenerativeModel? + private var liveSession: LiveSession? + + private var audioController: AudioController? + private var microphoneTask = Task {} + + init(backendType: BackendOption, sample: Sample? = nil) { + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) + + model = firebaseService.liveModel( + modelName: (backendType == .googleAI) ? "gemini-2.5-flash-native-audio-preview-09-2025" : + "gemini-live-2.5-flash-preview-native-audio-09-2025", + generationConfig: sample?.liveGenerationConfig, + tools: sample?.tools, + systemInstruction: sample?.systemInstruction + ) + title = sample?.title ?? "" + tip = sample?.tip + } + + /// Start a connection to the model. + /// + /// If a connection is already active, you'll need to call ``LiveViewModel/disconnect()`` first. + func connect() async { + guard let model, state == .idle else { + return } - /// Start a connection to the model. - /// - /// If a connection is already active, you'll need to call ``LiveViewModel/disconnect()`` first. - func connect() async { - guard let model, state == .idle else { - return - } - - if !isAudioOutputEnabled { - logger.warning("Playback audio is disabled.") - } - - guard await requestRecordPermission() else { - logger.warning("The user denied us permission to record the microphone.") - isAudioOutputEnabled = false - return - } + if !isAudioOutputEnabled { + logger.warning("Playback audio is disabled.") + } - state = .connecting - transcriptTypewriter.restart() - hasTranscripts = false + guard await requestRecordPermission() else { + logger.warning("The user denied us permission to record the microphone.") + isAudioOutputEnabled = false + return + } - do { - liveSession = try await model.connect() - audioController = try await AudioController() + state = .connecting + transcriptTypewriter.restart() + hasTranscripts = false - try await startRecording() + do { + liveSession = try await model.connect() + audioController = try await AudioController() - state = .connected - try await startProcessingResponses() - } catch { - logger.error("\(String(describing: error))") - self.error = error - await disconnect() - } - } + try await startRecording() - func onAudioPlaybackChanged() async { - if isAudioOutputEnabled { - guard await requestRecordPermission() else { - logger.warning("The user denied us permission to record the microphone.") - isAudioOutputEnabled = false - return - } - } + state = .connected + try await startProcessingResponses() + } catch { + logger.error("\(String(describing: error))") + self.error = error + await disconnect() } - - /// Disconnects the model. - /// - /// Will stop any pending playback, and the recording of the mic. - func disconnect() async { - await audioController?.stop() - await liveSession?.close() - microphoneTask.cancel() - state = .idle - liveSession = nil - transcriptTypewriter.clearPending() - - withAnimation { - backgroundColor = nil - } + } + + func onAudioPlaybackChanged() async { + if isAudioOutputEnabled { + guard await requestRecordPermission() else { + logger.warning("The user denied us permission to record the microphone.") + isAudioOutputEnabled = false + return + } } - - /// Starts recording data from the user's microphone, and sends it to the model. - private func startRecording() async throws { - guard let audioController, let liveSession else { return } - - let stream = try await audioController.listenToMic() - microphoneTask = Task { - do { - for await audioBuffer in stream { - try await liveSession.sendAudioRealtime(audioBuffer.int16Data()) - } - } catch { - logger.error("\(String(describing: error))") - self.error = error - await disconnect() - } - } + } + + /// Disconnects the model. + /// + /// Will stop any pending playback, and the recording of the mic. + func disconnect() async { + await audioController?.stop() + await liveSession?.close() + microphoneTask.cancel() + state = .idle + liveSession = nil + transcriptTypewriter.clearPending() + + withAnimation { + backgroundColor = nil } + } - /// Starts queuing responses from the model for parsing. - private func startProcessingResponses() async throws { - guard let liveSession else { return } + /// Starts recording data from the user's microphone, and sends it to the model. + private func startRecording() async throws { + guard let audioController, let liveSession else { return } - for try await response in liveSession.responses { - try await processServerMessage(response) + let stream = try await audioController.listenToMic() + microphoneTask = Task { + do { + for await audioBuffer in stream { + await liveSession.sendAudioRealtime(try audioBuffer.int16Data()) } + } catch { + logger.error("\(String(describing: error))") + self.error = error + await disconnect() + } } + } - /// Requests permission to record the user's microphone, returning the result. - /// - /// This is a requirement on iOS devices, on top of needing the proper recording - /// intents. - private func requestRecordPermission() async -> Bool { - await withCheckedContinuation { cont in - if #available(iOS 17.0, *) { - Task { - let ok = await AVAudioApplication.requestRecordPermission() - cont.resume(with: .success(ok)) - } - } else { - AVAudioSession.sharedInstance().requestRecordPermission { ok in - cont.resume(with: .success(ok)) - } - } - } - } + /// Starts queuing responses from the model for parsing. + private func startProcessingResponses() async throws { + guard let liveSession else { return } - private func processServerMessage(_ message: LiveServerMessage) async throws { - switch message.payload { - case let .content(content): - try await processServerContent(content) - case let .toolCall(toolCall): - try await processFunctionCalls(functionCalls: toolCall.functionCalls ?? []) - case .toolCallCancellation: - // we don't have any long running functions to cancel - return - case let .goingAwayNotice(goingAwayNotice): - let time = goingAwayNotice.timeLeft?.description ?? "soon" - logger.warning("Going away in: \(time)") - } + for try await response in liveSession.responses { + try await processServerMessage(response) } - - private func processServerContent(_ content: LiveServerContent) async throws { - if let message = content.modelTurn { - try await processAudioMessages(message) + } + + /// Requests permission to record the user's microphone, returning the result. + /// + /// This is a requirement on iOS devices, on top of needing the proper recording + /// intents. + private func requestRecordPermission() async -> Bool { + await withCheckedContinuation { cont in + if #available(iOS 17.0, *) { + Task { + let ok = await AVAudioApplication.requestRecordPermission() + cont.resume(with: .success(ok)) } - - if content.isTurnComplete { - // add a space, so the next time a transcript comes in, it's not squished with the previous one - transcriptTypewriter.appendText(" ") + } else { + AVAudioSession.sharedInstance().requestRecordPermission { ok in + cont.resume(with: .success(ok)) } + } + } + } + + private func processServerMessage(_ message: LiveServerMessage) async throws { + switch message.payload { + case let .content(content): + try await processServerContent(content) + case let .toolCall(toolCall): + try await processFunctionCalls(functionCalls: toolCall.functionCalls ?? []) + case .toolCallCancellation: + // we don't have any long running functions to cancel + return + case let .goingAwayNotice(goingAwayNotice): + let time = goingAwayNotice.timeLeft?.description ?? "soon" + logger.warning("Going away in: \(time)") + } + } - if content.wasInterrupted { - logger.warning("Model was interrupted") - await audioController?.interrupt() - transcriptTypewriter.clearPending() - // adds an em dash to indicate that the model was cutoff - transcriptTypewriter.appendText("— ") - } else if let transcript = content.outputAudioTranscription?.text { - appendAudioTranscript(transcript) - } + private func processServerContent(_ content: LiveServerContent) async throws { + if let message = content.modelTurn { + try await processAudioMessages(message) } - private func processAudioMessages(_ content: ModelContent) async throws { - for part in content.parts { - if let part = part as? InlineDataPart { - if part.mimeType.starts(with: "audio/pcm") { - if isAudioOutputEnabled { - try await audioController?.playAudio(audio: part.data) - } - } else { - logger.warning("Received non audio inline data part: \(part.mimeType)") - } - } - } + if content.isTurnComplete { + // add a space, so the next time a transcript comes in, it's not squished with the previous one + transcriptTypewriter.appendText(" ") } - private func processFunctionCalls(functionCalls: [FunctionCallPart]) async throws { - let responses = try functionCalls.map { functionCall in - switch functionCall.name { - case "changeBackgroundColor": - return try changeBackgroundColor(args: functionCall.args, id: functionCall.functionId) - case "clearBackgroundColor": - return clearBackgroundColor(id: functionCall.functionId) - default: - logger.debug("Function call: \(String(describing: functionCall))") - throw ApplicationError("Unknown function named \"\(functionCall.name)\".") - } + if content.wasInterrupted { + logger.warning("Model was interrupted") + await audioController?.interrupt() + transcriptTypewriter.clearPending() + // adds an em dash to indicate that the model was cutoff + transcriptTypewriter.appendText("— ") + } else if let transcript = content.outputAudioTranscription?.text { + appendAudioTranscript(transcript) + } + } + + private func processAudioMessages(_ content: ModelContent) async throws { + for part in content.parts { + if let part = part as? InlineDataPart { + if part.mimeType.starts(with: "audio/pcm") { + if isAudioOutputEnabled { + try await audioController?.playAudio(audio: part.data) + } + } else { + logger.warning("Received non audio inline data part: \(part.mimeType)") } - - await liveSession?.sendFunctionResponses(responses) + } } - - private func appendAudioTranscript(_ transcript: String) { - hasTranscripts = true - transcriptTypewriter.appendText(transcript) + } + + private func processFunctionCalls(functionCalls: [FunctionCallPart]) async throws { + let responses = try functionCalls.map { functionCall in + switch functionCall.name { + case "changeBackgroundColor": + return try changeBackgroundColor(args: functionCall.args, id: functionCall.functionId) + case "clearBackgroundColor": + return clearBackgroundColor(id: functionCall.functionId) + default: + logger.debug("Function call: \(String(describing: functionCall))") + throw ApplicationError("Unknown function named \"\(functionCall.name)\".") + } } - private func changeBackgroundColor(args: JSONObject, id: String?) throws -> FunctionResponsePart { - guard case let .string(color) = args["color"] else { - logger.debug("Function arguments: \(String(describing: args))") - throw ApplicationError("Missing `color` parameter.") - } + await liveSession?.sendFunctionResponses(responses) + } - withAnimation { - backgroundColor = Color(hex: color) - } + private func appendAudioTranscript(_ transcript: String) { + hasTranscripts = true + transcriptTypewriter.appendText(transcript) + } - if backgroundColor == nil { - logger.warning("The model sent us an invalid hex color: \(color)") - } + private func changeBackgroundColor(args: JSONObject, id: String?) throws -> FunctionResponsePart { + guard case let .string(color) = args["color"] else { + logger.debug("Function arguments: \(String(describing: args))") + throw ApplicationError("Missing `color` parameter.") + } - return FunctionResponsePart( - name: "changeBackgroundColor", - response: JSONObject(), - functionId: id - ) + withAnimation { + backgroundColor = Color(hex: color) } - private func clearBackgroundColor(id: String?) -> FunctionResponsePart { - withAnimation { - backgroundColor = nil - } + if backgroundColor == nil { + logger.warning("The model sent us an invalid hex color: \(color)") + } - return FunctionResponsePart( - name: "clearBackgroundColor", - response: JSONObject(), - functionId: id - ) + return FunctionResponsePart( + name: "changeBackgroundColor", + response: JSONObject(), + functionId: id + ) + } + + private func clearBackgroundColor(id: String?) -> FunctionResponsePart { + withAnimation { + backgroundColor = nil } + + return FunctionResponsePart( + name: "clearBackgroundColor", + response: JSONObject(), + functionId: id + ) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift index 03621d6fc..ab607fd4b 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/ViewModels/TypeWriterViewModel.swift @@ -12,85 +12,85 @@ // See the License for the specific language governing permissions and // limitations under the License. -import Combine -import Foundation import SwiftUI +import Foundation +import Combine @MainActor class TypeWriterViewModel: ObservableObject { - @Published - var text: String = "" + @Published + var text: String = "" - /// How long to wait (in milliseconds) between showing the next character. - var delay: Int = 65 + /// How long to wait (in milliseconds) between showing the next character. + var delay: Int = 65 - private var pendingText = [Character]() - private var processTextTask: Task? + private var pendingText = [Character]() + private var processTextTask: Task? - init() { - processTask() - } + init() { + processTask() + } - deinit { - processTextTask?.cancel() - } + deinit { + processTextTask?.cancel() + } - /// Queues text to show. - /// - /// Since the text is queued, the text wont be displayed until the previous - /// pending text is populated. - func appendText(_ text: String) { - pendingText.append(contentsOf: text) - } + /// Queues text to show. + /// + /// Since the text is queued, the text wont be displayed until the previous + /// pending text is populated. + func appendText(_ text: String) { + pendingText.append(contentsOf: text) + } - /// Clears any text from the queue that is pending being added to the text. - func clearPending() { - pendingText.removeAll() - } + /// Clears any text from the queue that is pending being added to the text. + func clearPending() { + pendingText.removeAll() + } - /// Restarts the class to be a fresh instance. - /// - /// Effectively, this removes all the currently tracked text, - /// and any pending text. - func restart() { - clearPending() - text = "" - } + /// Restarts the class to be a fresh instance. + /// + /// Effectively, this removes all the currently tracked text, + /// and any pending text. + func restart() { + clearPending() + text = "" + } - /// Long running task for processing characters. - private func processTask() { - processTextTask = Task { - var delay = delay - while !Task.isCancelled { - try? await Task.sleep(for: .milliseconds(delay)) + /// Long running task for processing characters. + private func processTask() { + processTextTask = Task { + var delay = delay + while !Task.isCancelled { + try? await Task.sleep(for: .milliseconds(delay)) - delay = processNextCharacter() - } - } + delay = processNextCharacter() + } } + } - /// Determines the delay for the next character, adding pending text as needed. - /// - /// We don't have a delay when outputting whitespace or the end of a sentence. - /// - /// - Returns: The MS delay before working on the next character in the queue. - private func processNextCharacter() -> Int { - guard !pendingText.isEmpty else { - return delay // Default delay if no text is pending - } + /// Determines the delay for the next character, adding pending text as needed. + /// + /// We don't have a delay when outputting whitespace or the end of a sentence. + /// + /// - Returns: The MS delay before working on the next character in the queue. + private func processNextCharacter() -> Int { + guard !pendingText.isEmpty else { + return delay // Default delay if no text is pending + } - let char = pendingText.removeFirst() - text.append(char) + let char = pendingText.removeFirst() + text.append(char) - return (char.isWhitespace || char.isEndOfSentence) ? 0 : delay - } + return (char.isWhitespace || char.isEndOfSentence) ? 0 : delay + } } extension Character { - /// Marker for punctuation that dictates the end of a sentence. - /// - /// Namely, this checks for `.`, `!` and `?`. - var isEndOfSentence: Bool { - self == "." || self == "!" || self == "?" - } + /// Marker for punctuation that dictates the end of a sentence. + /// + /// Namely, this checks for `.`, `!` and `?`. + var isEndOfSentence: Bool { + self == "." || self == "!" || self == "?" + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift index 68109cf9a..02c8d40f3 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/AudioOutputToggle.swift @@ -15,25 +15,25 @@ import SwiftUI struct AudioOutputToggle: View { - @Binding var isEnabled: Bool - var onChange: () -> Void = {} + @Binding var isEnabled: Bool + var onChange: () -> Void = {} - var body: some View { - VStack(alignment: .leading, spacing: 5) { - Toggle("Audio Output", isOn: $isEnabled).onChange(of: isEnabled) { _, _ in - onChange() - } + var body: some View { + VStack(alignment: .leading, spacing: 5) { + Toggle("Audio Output", isOn: $isEnabled).onChange(of: isEnabled) { _, _ in + onChange() + } - Text(""" - Audio output works best on physical devices. Enable this to test playback in the \ - simulator. Headphones recommended. - """) - .font(.caption) - .foregroundStyle(.secondary) - } + Text(""" + Audio output works best on physical devices. Enable this to test playback in the \ + simulator. Headphones recommended. + """) + .font(.caption) + .foregroundStyle(.secondary) } + } } #Preview { - AudioOutputToggle(isEnabled: .constant(false)) + AudioOutputToggle(isEnabled: .constant(false)) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift index 20eb341eb..e4ed9ef05 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/ConnectButton.swift @@ -15,111 +15,111 @@ import SwiftUI struct ConnectButton: View { - var state: LiveViewModelState - var onConnect: () async -> Void - var onDisconnect: () async -> Void + var state: LiveViewModelState + var onConnect: () async -> Void + var onDisconnect: () async -> Void - @State private var gradientAngle: Angle = .zero + @State private var gradientAngle: Angle = .zero - private var isConnected: Bool { state == .connected } + private var isConnected: Bool { state == .connected } - private var title: String { - switch state { - case .connected: "Stop" - case .connecting: "Connecting..." - case .idle: "Start" - } + private var title: String { + switch state { + case .connected: "Stop" + case .connecting: "Connecting..." + case .idle: "Start" } + } - private var image: String { - switch state { - case .connected: "stop" - case .connecting: "wifi" - case .idle: "play" - } + private var image: String { + switch state { + case .connected: "stop" + case .connecting: "wifi" + case .idle: "play" } + } - var body: some View { - Button(action: onClick) { - Label(title, systemImage: image) - .frame(maxWidth: .infinity) - .padding() - } - .buttonStyle(.connect(state: state, gradientAngle: gradientAngle)) - .onAppear { - withAnimation(.linear(duration: 5).repeatForever(autoreverses: false)) { - self.gradientAngle = .degrees(360) - } - } + var body: some View { + Button(action: onClick) { + Label(title, systemImage: image) + .frame(maxWidth: .infinity) + .padding() } + .buttonStyle(.connect(state: state, gradientAngle: gradientAngle)) + .onAppear { + withAnimation(.linear(duration: 5).repeatForever(autoreverses: false)) { + self.gradientAngle = .degrees(360) + } + } + } - private func onClick() { - Task { - if isConnected { - await onDisconnect() - } else { - await onConnect() - } - } + private func onClick() { + Task { + if isConnected { + await onDisconnect() + } else { + await onConnect() + } } + } } struct ConnectButtonStyle: ButtonStyle { - var state: LiveViewModelState - var gradientAngle: Angle + var state: LiveViewModelState + var gradientAngle: Angle - private var color: Color { - switch state { - case .connected: Color(.systemRed) - case .connecting: Color.secondary - case .idle: Color.accentColor - } + private var color: Color { + switch state { + case .connected: Color(.systemRed) + case .connecting: Color.secondary + case .idle: Color.accentColor } + } - private var gradientColors: [Color] { - switch state { - case .connected: [Color(.systemRed)] - case .connecting: [.secondary, .white] - case .idle: [ - Color(.systemRed), - Color(.systemBlue), - Color(.systemGreen), - Color(.systemYellow), - Color(.systemRed), - ] - } + private var gradientColors: [Color] { + switch state { + case .connected: [Color(.systemRed)] + case .connecting: [.secondary, .white] + case .idle: [ + Color(.systemRed), + Color(.systemBlue), + Color(.systemGreen), + Color(.systemYellow), + Color(.systemRed), + ] } + } - func makeBody(configuration: Configuration) -> some View { - configuration.label - .disabled(state == .connecting) - .overlay( - RoundedRectangle(cornerRadius: 35) - .stroke( - AngularGradient( - gradient: Gradient(colors: gradientColors), - center: .center, - startAngle: gradientAngle, - endAngle: gradientAngle + .degrees(360) - ), - lineWidth: 3 - ) - ) - .foregroundStyle(color) - } + func makeBody(configuration: Configuration) -> some View { + configuration.label + .disabled(state == .connecting) + .overlay( + RoundedRectangle(cornerRadius: 35) + .stroke( + AngularGradient( + gradient: Gradient(colors: gradientColors), + center: .center, + startAngle: gradientAngle, + endAngle: gradientAngle + .degrees(360) + ), + lineWidth: 3 + ) + ) + .foregroundStyle(color) + } } extension ButtonStyle where Self == ConnectButtonStyle { - static func connect(state: LiveViewModelState, gradientAngle: Angle) -> ConnectButtonStyle { - ConnectButtonStyle(state: state, gradientAngle: gradientAngle) - } + static func connect(state: LiveViewModelState, gradientAngle: Angle) -> ConnectButtonStyle { + ConnectButtonStyle(state: state, gradientAngle: gradientAngle) + } } #Preview { - VStack(spacing: 30) { - ConnectButton(state: .idle, onConnect: {}, onDisconnect: {}) - ConnectButton(state: .connecting, onConnect: {}, onDisconnect: {}) - ConnectButton(state: .connected, onConnect: {}, onDisconnect: {}) - } - .padding(.horizontal) + VStack(spacing: 30) { + ConnectButton(state: .idle, onConnect: {}, onDisconnect: {}) + ConnectButton(state: .connecting, onConnect: {}, onDisconnect: {}) + ConnectButton(state: .connected, onConnect: {}, onDisconnect: {}) + } + .padding(.horizontal) } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift index 7825fbeb1..1c36733a4 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/ModelAvatar.swift @@ -15,57 +15,57 @@ import SwiftUI struct ModelAvatar: View { - var isConnected = false + var isConnected = false - @State private var gradientAngle: Angle = .zero + @State private var gradientAngle: Angle = .zero - var colors: [Color] { - if isConnected { - [.red, .blue, .green, .yellow, .red] - } else { - [Color(red: 0.5, green: 0.5, blue: 0.5, opacity: 0.3)] - } + var colors: [Color] { + if isConnected { + [.red, .blue, .green, .yellow, .red] + } else { + [Color(red: 0.5, green: 0.5, blue: 0.5, opacity: 0.3)] } + } - var body: some View { - Image("gemini-logo") - .resizable() - .aspectRatio(contentMode: .fit) - .padding() - .colorMultiply(.black) - .maskedOverlay { - AngularGradient( - gradient: Gradient(colors: colors), - center: .leading, - startAngle: gradientAngle, - endAngle: gradientAngle + .degrees(360) - ) - } - .onAppear { - withAnimation(.linear(duration: 10).repeatForever(autoreverses: false)) { - self.gradientAngle = .degrees(360) - } - } - } + var body: some View { + Image("gemini-logo") + .resizable() + .aspectRatio(contentMode: .fit) + .padding() + .colorMultiply(.black) + .maskedOverlay { + AngularGradient( + gradient: Gradient(colors: colors), + center: .leading, + startAngle: gradientAngle, + endAngle: gradientAngle + .degrees(360) + ) + } + .onAppear { + withAnimation(.linear(duration: 10).repeatForever(autoreverses: false)) { + self.gradientAngle = .degrees(360) + } + } + } } extension View { - /// Creates an overlay which takes advantage of a mask to respect the size of the view. - /// - /// Especially useful when you want to create an overlay of an view with a non standard - /// size. - @ViewBuilder - func maskedOverlay(mask: () -> some View) -> some View { - overlay { - mask() - .mask { self } - } + /// Creates an overlay which takes advantage of a mask to respect the size of the view. + /// + /// Especially useful when you want to create an overlay of an view with a non standard + /// size. + @ViewBuilder + func maskedOverlay(mask: () -> some View) -> some View { + overlay { + mask() + .mask { self } } + } } #Preview { - VStack { - ModelAvatar(isConnected: true) - ModelAvatar(isConnected: false) - } + VStack { + ModelAvatar(isConnected: true) + ModelAvatar(isConnected: false) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift b/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift index 3df7f5d89..134d44df5 100644 --- a/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift +++ b/firebaseai/FirebaseAIExample/Features/Live/Views/TranscriptView.swift @@ -15,31 +15,31 @@ import SwiftUI struct TranscriptView: View { - @ObservedObject var typewriter: TypeWriterViewModel + @ObservedObject var typewriter: TypeWriterViewModel - var body: some View { - ScrollViewReader { proxy in - ScrollView { - Text(typewriter.text) - .font(.title3) - .frame(maxWidth: .infinity, alignment: .leading) - .transition(.opacity) - .padding(.horizontal) - .id("transcript") - } - .onChange(of: typewriter.text, initial: false) { _, _ in - proxy.scrollTo("transcript", anchor: .bottom) - } - } + var body: some View { + ScrollViewReader { proxy in + ScrollView { + Text(typewriter.text) + .font(.title3) + .frame(maxWidth: .infinity, alignment: .leading) + .transition(.opacity) + .padding(.horizontal) + .id("transcript") + } + .onChange(of: typewriter.text, initial: false) { old, new in + proxy.scrollTo("transcript", anchor: .bottom) + } } + } } #Preview { - let vm = TypeWriterViewModel() - TranscriptView(typewriter: vm) - .onAppear { - vm.appendText( - "The sky is blue primarily because of a phenomenon called Rayleigh scattering, where tiny molecules of gas (mainly nitrogen and oxygen) in Earth's atmosphere scatter sunlight in all directions." - ) - } + let vm = TypeWriterViewModel() + TranscriptView(typewriter: vm) + .onAppear { + vm.appendText( + "The sky is blue primarily because of a phenomenon called Rayleigh scattering, where tiny molecules of gas (mainly nitrogen and oxygen) in Earth's atmosphere scatter sunlight in all directions." + ) + } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift index 3a7dcf109..c4502a567 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Models/MultimodalAttachment.swift @@ -13,280 +13,283 @@ // limitations under the License. import Foundation -import PhotosUI import SwiftUI +import PhotosUI #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import ConversationKit public enum MultimodalAttachmentError: LocalizedError { - case unsupportedFileType(extension: String) - case noDataAvailable - case loadingFailed(Error) - case mimeTypeMismatch(expected: String, provided: String, extension: String) - - public var errorDescription: String? { - switch self { - case let .unsupportedFileType(ext): - return "Unsupported file format: .\(ext). Please select a supported format file." - case .noDataAvailable: - return "File data is not available" - case let .loadingFailed(error): - return "File loading failed: \(error.localizedDescription)" - case let .mimeTypeMismatch(expected, provided, ext): - return "MIME type mismatch for .\(ext) file: expected '\(expected)', got '\(provided)'" - } + case unsupportedFileType(extension: String) + case noDataAvailable + case loadingFailed(Error) + case mimeTypeMismatch(expected: String, provided: String, extension: String) + + public var errorDescription: String? { + switch self { + case let .unsupportedFileType(ext): + return "Unsupported file format: .\(ext). Please select a supported format file." + case .noDataAvailable: + return "File data is not available" + case let .loadingFailed(error): + return "File loading failed: \(error.localizedDescription)" + case let .mimeTypeMismatch(expected, provided, ext): + return "MIME type mismatch for .\(ext) file: expected '\(expected)', got '\(provided)'" } + } } // MultimodalAttachment is a struct used for transporting data between ViewModels and AttachmentPreviewCard public struct MultimodalAttachment: Attachment, Equatable { - public let id = UUID() - public let mimeType: String - public let data: Data? - public let url: URL? - public var isCloudStorage: Bool = false - - public static func == (lhs: MultimodalAttachment, rhs: MultimodalAttachment) -> Bool { - return lhs.id == rhs.id - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(id) - } - - public init(mimeType: String, data: Data? = nil, url: URL? = nil) { - self.mimeType = mimeType - self.data = data - self.url = url - } - - public init(fileDataPart: FileDataPart) { - mimeType = fileDataPart.mimeType - data = nil - url = URL(string: fileDataPart.uri) - isCloudStorage = true - } + public let id = UUID() + public let mimeType: String + public let data: Data? + public let url: URL? + public var isCloudStorage: Bool = false + + public static func == (lhs: MultimodalAttachment, rhs: MultimodalAttachment) -> Bool { + return lhs.id == rhs.id + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(id) + } + + public init(mimeType: String, data: Data? = nil, url: URL? = nil) { + self.mimeType = mimeType + self.data = data + self.url = url + } + + public init(fileDataPart: FileDataPart) { + mimeType = fileDataPart.mimeType + data = nil + url = URL(string: fileDataPart.uri) + isCloudStorage = true + } } extension MultimodalAttachment: View { - public var body: some View { - AttachmentPreviewCard(attachment: self) - } + public var body: some View { + AttachmentPreviewCard(attachment: self) + } } // validate file type & mime type -public extension MultimodalAttachment { - static let supportedFileExtensions: Set = [ - // Images - "png", "jpeg", "webp", - // Video - "flv", "mov", "mpeg", "mpegps", "mpg", "mp4", "webm", "wmv", "3gpp", - // Audio - "aac", "flac", "mp3", "mpa", "mpeg", "mpga", "mp4", "opus", "pcm", "wav", "webm", - // Documents - "pdf", "txt", - ] - - static func validateFileType(url: URL) throws { - let fileExtension = url.pathExtension.lowercased() - guard !fileExtension.isEmpty else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") - } - - guard supportedFileExtensions.contains(fileExtension) else { - throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) - } +extension MultimodalAttachment { + public static let supportedFileExtensions: Set = [ + // Images + "png", "jpeg", "webp", + // Video + "flv", "mov", "mpeg", "mpegps", "mpg", "mp4", "webm", "wmv", "3gpp", + // Audio + "aac", "flac", "mp3", "mpa", "mpeg", "mpga", "mp4", "opus", "pcm", "wav", "webm", + // Documents + "pdf", "txt", + ] + + public static func validateFileType(url: URL) throws { + let fileExtension = url.pathExtension.lowercased() + guard !fileExtension.isEmpty else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") } - static func validateMimeTypeMatch(url: URL, mimeType: String) throws { - let expectedMimeType = getMimeType(for: url) - - guard mimeType == expectedMimeType else { - throw MultimodalAttachmentError.mimeTypeMismatch( - expected: expectedMimeType, - provided: mimeType, - extension: url.pathExtension - ) - } + guard supportedFileExtensions.contains(fileExtension) else { + throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) } + } - static func validatePhotoType(_ item: PhotosPickerItem) throws -> String { - guard let fileExtension = item.supportedContentTypes.first?.preferredFilenameExtension else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") - } + public static func validateMimeTypeMatch(url: URL, mimeType: String) throws { + let expectedMimeType = getMimeType(for: url) - guard supportedFileExtensions.contains(fileExtension) else { - throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) - } + guard mimeType == expectedMimeType else { + throw MultimodalAttachmentError.mimeTypeMismatch( + expected: expectedMimeType, + provided: mimeType, + extension: url.pathExtension + ) + } + } - guard let fileMimeType = item.supportedContentTypes.first?.preferredMIMEType else { - throw MultimodalAttachmentError.unsupportedFileType(extension: "No MIME type") - } + public static func validatePhotoType(_ item: PhotosPickerItem) throws -> String { + guard let fileExtension = item.supportedContentTypes.first?.preferredFilenameExtension else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No extension") + } - return fileMimeType + guard supportedFileExtensions.contains(fileExtension) else { + throw MultimodalAttachmentError.unsupportedFileType(extension: fileExtension) } + + guard let fileMimeType = item.supportedContentTypes.first?.preferredMIMEType else { + throw MultimodalAttachmentError.unsupportedFileType(extension: "No MIME type") + } + + return fileMimeType + } } // load data from picker item or url -public extension MultimodalAttachment { - static func fromPhotosPickerItem(_ item: PhotosPickerItem) async throws - -> MultimodalAttachment - { - let fileMimeType = try validatePhotoType(item) - - do { - guard let data = try await item.loadTransferable(type: Data.self) else { - throw MultimodalAttachmentError.noDataAvailable - } - - return MultimodalAttachment( - mimeType: fileMimeType, - data: data - ) - } catch let error as MultimodalAttachmentError { - throw error - } catch { - throw MultimodalAttachmentError.loadingFailed(error) - } +extension MultimodalAttachment { + public static func fromPhotosPickerItem(_ item: PhotosPickerItem) async throws + -> MultimodalAttachment { + let fileMimeType = try validatePhotoType(item) + + do { + guard let data = try await item.loadTransferable(type: Data.self) else { + throw MultimodalAttachmentError.noDataAvailable + } + + return MultimodalAttachment( + mimeType: fileMimeType, + data: data + ) + } catch let error as MultimodalAttachmentError { + throw error + } catch { + throw MultimodalAttachmentError.loadingFailed(error) } + } - static func fromFilePickerItem(from url: URL) async throws -> MultimodalAttachment { - try validateFileType(url: url) + public static func fromFilePickerItem(from url: URL) async throws -> MultimodalAttachment { + try validateFileType(url: url) - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value - let mimeType = Self.getMimeType(for: url) + let mimeType = Self.getMimeType(for: url) - return MultimodalAttachment( - mimeType: mimeType, - data: data, - url: url - ) - } catch { - throw MultimodalAttachmentError.loadingFailed(error) - } + return MultimodalAttachment( + mimeType: mimeType, + data: data, + url: url + ) + } catch { + throw MultimodalAttachmentError.loadingFailed(error) } - - static func fromURL(_ url: URL, mimeType: String) async throws -> MultimodalAttachment { - try validateFileType(url: url) - try validateMimeTypeMatch(url: url, mimeType: mimeType) - - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value - - return MultimodalAttachment( - mimeType: mimeType, - data: data, - url: url - ) - } catch { - throw MultimodalAttachmentError.loadingFailed(error) - } + } + + public static func fromURL(_ url: URL, mimeType: String) async throws -> MultimodalAttachment { + try validateFileType(url: url) + try validateMimeTypeMatch(url: url, mimeType: mimeType) + + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value + + return MultimodalAttachment( + mimeType: mimeType, + data: data, + url: url + ) + } catch { + throw MultimodalAttachmentError.loadingFailed(error) } + } - func toInlineDataPart() async -> InlineDataPart? { - if let data = data, !data.isEmpty { - return InlineDataPart(data: data, mimeType: mimeType) - } - - // If the data is not available, try to read it from the url. - guard let url = url else { return nil } - do { - let data = try await Task.detached(priority: .utility) { - try Data(contentsOf: url) - }.value - - guard !data.isEmpty else { return nil } - return InlineDataPart(data: data, mimeType: mimeType) - } catch { - return nil - } + public func toInlineDataPart() async -> InlineDataPart? { + if let data = data, !data.isEmpty { + return InlineDataPart(data: data, mimeType: mimeType) } - private static func getMimeType(for url: URL) -> String { - let fileExtension = url.pathExtension.lowercased() - - switch fileExtension { - // Images - case "png": - return "image/png" - case "jpeg": - return "image/jpeg" - case "webp": - return "image/webp" - // Video - case "flv": - return "video/x-flv" - case "mov": - return "video/quicktime" - case "mpeg": - return "video/mpeg" - case "mpegps": - return "video/mpegps" - case "mpg": - return "video/mpg" - case "mp4": - return "video/mp4" - case "webm": - return "video/webm" - case "wmv": - return "video/wmv" - case "3gpp": - return "video/3gpp" - // Audio - case "aac": - return "audio/aac" - case "flac": - return "audio/flac" - case "mp3": - return "audio/mp3" - case "mpa": - return "audio/m4a" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .mpeg files are more likely to be video since MP3 files are more likely to use the - // .mp3 file extension. - // case "mpeg": - // return "audio/mpeg" - case "mpga": - return "audio/mpga" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .mp4 files are potentially more likely to be video since AAC and ALAC files - // frequently use the .m4a file extension within the Apple ecosystem, though it is - // still ambiguous whether it is audio or video from the file extension alone. - // case "mp4": - // return "audio/mp4" - case "opus": - return "audio/opus" - case "wav": - return "audio/wav" - // TODO: Find a more accurate way to determine the MIME type. - // Commented out to silence the warning "Literal value is already handled by previous pattern; - // consider removing it". - // Context: .webm files are potentially more likely to be video since WebM files frequently use - // the .weba file extension when they only contain audio (Ogg Vorbis / Opus), though it - // is still ambiguous whether it is audio or video based on the file extension alone. - // case "webm": - // return "audio/webm" - // Documents / text - case "pdf": - return "application/pdf" - case "txt": - return "text/plain" - default: - return "application/octet-stream" - } + // If the data is not available, try to read it from the url. + guard let url = url else { return nil } + do { + let data = try await Task.detached(priority: .utility) { + try Data(contentsOf: url) + }.value + + guard !data.isEmpty else { return nil } + return InlineDataPart(data: data, mimeType: mimeType) + } catch { + return nil + } + } + + private static func getMimeType(for url: URL) -> String { + let fileExtension = url.pathExtension.lowercased() + + switch fileExtension { + // Images + case "png": + return "image/png" + case "jpeg": + return "image/jpeg" + case "webp": + return "image/webp" + + // Video + case "flv": + return "video/x-flv" + case "mov": + return "video/quicktime" + case "mpeg": + return "video/mpeg" + case "mpegps": + return "video/mpegps" + case "mpg": + return "video/mpg" + case "mp4": + return "video/mp4" + case "webm": + return "video/webm" + case "wmv": + return "video/wmv" + case "3gpp": + return "video/3gpp" + + // Audio + case "aac": + return "audio/aac" + case "flac": + return "audio/flac" + case "mp3": + return "audio/mp3" + case "mpa": + return "audio/m4a" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .mpeg files are more likely to be video since MP3 files are more likely to use the + // .mp3 file extension. + // case "mpeg": + // return "audio/mpeg" + case "mpga": + return "audio/mpga" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .mp4 files are potentially more likely to be video since AAC and ALAC files + // frequently use the .m4a file extension within the Apple ecosystem, though it is + // still ambiguous whether it is audio or video from the file extension alone. + // case "mp4": + // return "audio/mp4" + case "opus": + return "audio/opus" + case "wav": + return "audio/wav" + // TODO: Find a more accurate way to determine the MIME type. + // Commented out to silence the warning "Literal value is already handled by previous pattern; + // consider removing it". + // Context: .webm files are potentially more likely to be video since WebM files frequently use + // the .weba file extension when they only contain audio (Ogg Vorbis / Opus), though it + // is still ambiguous whether it is audio or video based on the file extension alone. + // case "webm": + // return "audio/webm" + + // Documents / text + case "pdf": + return "application/pdf" + case "txt": + return "text/plain" + + default: + return "application/octet-stream" } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift index 0a9fee931..c3623cc77 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Screens/MultimodalScreen.swift @@ -13,188 +13,187 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import ConversationKit -import PhotosUI import SwiftUI +import PhotosUI +import ConversationKit struct MultimodalScreen: View { - let backendType: BackendOption - @StateObject var viewModel: MultimodalViewModel + let backendType: BackendOption + @StateObject var viewModel: MultimodalViewModel - @State private var showingPhotoPicker = false - @State private var showingFilePicker = false - @State private var showingLinkDialog = false - @State private var linkText = "" - @State private var linkMimeType = "" - @State private var selectedPhotoItems = [PhotosPickerItem]() + @State private var showingPhotoPicker = false + @State private var showingFilePicker = false + @State private var showingLinkDialog = false + @State private var linkText = "" + @State private var linkMimeType = "" + @State private var selectedPhotoItems = [PhotosPickerItem]() - init(backendType: BackendOption, sample: Sample? = nil) { - self.backendType = backendType - _viewModel = - StateObject(wrappedValue: MultimodalViewModel(backendType: backendType, - sample: sample)) - } + init(backendType: BackendOption, sample: Sample? = nil) { + self.backendType = backendType + _viewModel = + StateObject(wrappedValue: MultimodalViewModel(backendType: backendType, + sample: sample)) + } - var body: some View { - NavigationStack { - ConversationView(messages: $viewModel.messages, - attachments: $viewModel.attachments, - userPrompt: viewModel.initialPrompt) - { message in - MessageView(message: message) - } - .attachmentActions { - Button(action: showLinkDialog) { - Label("Link", systemImage: "link") - } - Button(action: showFilePicker) { - Label("File", systemImage: "doc.text") - } - Button(action: showPhotoPicker) { - Label("Photo", systemImage: "photo.on.rectangle.angled") - } - } - .onSendMessage { message in - await viewModel.sendMessage(message.content ?? "", streaming: true) - } - .onError { _ in - viewModel.presentErrorDetails = true - } - .sheet(isPresented: $viewModel.presentErrorDetails) { - if let error = viewModel.error { - ErrorDetailsView(error: error) - } - } - .photosPicker( - isPresented: $showingPhotoPicker, - selection: $selectedPhotoItems, - maxSelectionCount: 5, - matching: .any(of: [.images, .videos]) - ) - .fileImporter( - isPresented: $showingFilePicker, - allowedContentTypes: [.pdf, .audio], - allowsMultipleSelection: true - ) { result in - handleFileImport(result) - } - .alert("Add Web URL", isPresented: $showingLinkDialog) { - TextField("Enter URL", text: $linkText) - TextField("Enter mimeType", text: $linkMimeType) - Button("Add") { - handleLinkAttachment() - } - Button("Cancel", role: .cancel) { - linkText = "" - linkMimeType = "" - } - } + var body: some View { + NavigationStack { + ConversationView(messages: $viewModel.messages, + attachments: $viewModel.attachments, + userPrompt: viewModel.initialPrompt) { message in + MessageView(message: message) + } + .attachmentActions { + Button(action: showLinkDialog) { + Label("Link", systemImage: "link") } - .onChange(of: selectedPhotoItems) { _, newItems in - handlePhotoSelection(newItems) + Button(action: showFilePicker) { + Label("File", systemImage: "doc.text") } - .toolbar { - ToolbarItem(placement: .primaryAction) { - Button(action: newChat) { - Image(systemName: "square.and.pencil") - } - } + Button(action: showPhotoPicker) { + Label("Photo", systemImage: "photo.on.rectangle.angled") + } + } + .onSendMessage { message in + await viewModel.sendMessage(message.content ?? "", streaming: true) + } + .onError { error in + viewModel.presentErrorDetails = true + } + .sheet(isPresented: $viewModel.presentErrorDetails) { + if let error = viewModel.error { + ErrorDetailsView(error: error) } - .navigationTitle(viewModel.title) - .navigationBarTitleDisplayMode(.inline) + } + .photosPicker( + isPresented: $showingPhotoPicker, + selection: $selectedPhotoItems, + maxSelectionCount: 5, + matching: .any(of: [.images, .videos]) + ) + .fileImporter( + isPresented: $showingFilePicker, + allowedContentTypes: [.pdf, .audio], + allowsMultipleSelection: true + ) { result in + handleFileImport(result) + } + .alert("Add Web URL", isPresented: $showingLinkDialog) { + TextField("Enter URL", text: $linkText) + TextField("Enter mimeType", text: $linkMimeType) + Button("Add") { + handleLinkAttachment() + } + Button("Cancel", role: .cancel) { + linkText = "" + linkMimeType = "" + } + } } - - private func newChat() { - viewModel.startNewChat() + .onChange(of: selectedPhotoItems) { _, newItems in + handlePhotoSelection(newItems) } - - private func showPhotoPicker() { - showingPhotoPicker = true + .toolbar { + ToolbarItem(placement: .primaryAction) { + Button(action: newChat) { + Image(systemName: "square.and.pencil") + } + } } + .navigationTitle(viewModel.title) + .navigationBarTitleDisplayMode(.inline) + } - private func showFilePicker() { - showingFilePicker = true - } + private func newChat() { + viewModel.startNewChat() + } + + private func showPhotoPicker() { + showingPhotoPicker = true + } + + private func showFilePicker() { + showingFilePicker = true + } + + private func showLinkDialog() { + showingLinkDialog = true + } - private func showLinkDialog() { - showingLinkDialog = true + private func handlePhotoSelection(_ items: [PhotosPickerItem]) { + Task { + for item in items { + do { + let attachment = try await MultimodalAttachment.fromPhotosPickerItem(item) + await MainActor.run { + viewModel.addAttachment(attachment) + } + } catch { + await MainActor.run { + viewModel.error = error + viewModel.presentErrorDetails = true + } + } + } + await MainActor.run { + selectedPhotoItems = [] + } } + } - private func handlePhotoSelection(_ items: [PhotosPickerItem]) { - Task { - for item in items { - do { - let attachment = try await MultimodalAttachment.fromPhotosPickerItem(item) - await MainActor.run { - viewModel.addAttachment(attachment) - } - } catch { - await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true - } - } + private func handleFileImport(_ result: Result<[URL], Error>) { + switch result { + case let .success(urls): + Task { + for url in urls { + do { + let attachment = try await MultimodalAttachment.fromFilePickerItem(from: url) + await MainActor.run { + viewModel.addAttachment(attachment) } + } catch { await MainActor.run { - selectedPhotoItems = [] + viewModel.error = error + viewModel.presentErrorDetails = true } + } } + } + case let .failure(error): + viewModel.error = error + viewModel.presentErrorDetails = true } + } - private func handleFileImport(_ result: Result<[URL], Error>) { - switch result { - case let .success(urls): - Task { - for url in urls { - do { - let attachment = try await MultimodalAttachment.fromFilePickerItem(from: url) - await MainActor.run { - viewModel.addAttachment(attachment) - } - } catch { - await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true - } - } - } - } - case let .failure(error): - viewModel.error = error - viewModel.presentErrorDetails = true - } + private func handleLinkAttachment() { + guard !linkText.isEmpty, let url = URL(string: linkText) else { + return } - private func handleLinkAttachment() { - guard !linkText.isEmpty, let url = URL(string: linkText) else { - return + let trimmedMime = linkMimeType.lowercased().trimmingCharacters(in: .whitespacesAndNewlines) + Task { + do { + let attachment = try await MultimodalAttachment.fromURL(url, mimeType: trimmedMime) + await MainActor.run { + viewModel.addAttachment(attachment) } - - let trimmedMime = linkMimeType.lowercased().trimmingCharacters(in: .whitespacesAndNewlines) - Task { - do { - let attachment = try await MultimodalAttachment.fromURL(url, mimeType: trimmedMime) - await MainActor.run { - viewModel.addAttachment(attachment) - } - } catch { - await MainActor.run { - viewModel.error = error - viewModel.presentErrorDetails = true - } - } - await MainActor.run { - linkText = "" - linkMimeType = "" - } + } catch { + await MainActor.run { + viewModel.error = error + viewModel.presentErrorDetails = true } + } + await MainActor.run { + linkText = "" + linkMimeType = "" + } } + } } #Preview { - MultimodalScreen(backendType: .googleAI) + MultimodalScreen(backendType: .googleAI) } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift index b5a861ee2..cb13cb694 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/ViewModels/MultimodalViewModel.swift @@ -13,211 +13,211 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif -import AVFoundation -import Combine -import ConversationKit import Foundation import OSLog import PhotosUI import SwiftUI +import AVFoundation +import Combine +import ConversationKit @MainActor class MultimodalViewModel: ObservableObject { - @Published var messages = [ChatMessage]() - @Published var initialPrompt: String = "" - @Published var title: String = "" - @Published var error: Error? - @Published var inProgress = false - - @Published var presentErrorDetails: Bool = false - - @Published var attachments = [MultimodalAttachment]() - - private var model: GenerativeModel - private var chat: Chat - private var chatTask: Task? - private let logger = Logger(subsystem: "com.example.firebaseai", category: "MultimodalViewModel") - - private var sample: Sample? - private var backendType: BackendOption - private var fileDataParts: [FileDataPart]? - - init(backendType: BackendOption, sample: Sample? = nil) { - self.sample = sample - self.backendType = backendType - - let firebaseService = backendType == .googleAI - ? FirebaseAI.firebaseAI(backend: .googleAI()) - : FirebaseAI.firebaseAI(backend: .vertexAI()) + @Published var messages = [ChatMessage]() + @Published var initialPrompt: String = "" + @Published var title: String = "" + @Published var error: Error? + @Published var inProgress = false + + @Published var presentErrorDetails: Bool = false + + @Published var attachments = [MultimodalAttachment]() + + private var model: GenerativeModel + private var chat: Chat + private var chatTask: Task? + private let logger = Logger(subsystem: "com.example.firebaseai", category: "MultimodalViewModel") + + private var sample: Sample? + private var backendType: BackendOption + private var fileDataParts: [FileDataPart]? + + init(backendType: BackendOption, sample: Sample? = nil) { + self.sample = sample + self.backendType = backendType + + let firebaseService = backendType == .googleAI + ? FirebaseAI.firebaseAI(backend: .googleAI()) + : FirebaseAI.firebaseAI(backend: .vertexAI()) + + model = firebaseService.generativeModel( + modelName: sample?.modelName ?? "gemini-2.5-flash", + systemInstruction: sample?.systemInstruction + ) + + if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { + messages = ChatMessage.from(chatHistory) + chat = model.startChat(history: chatHistory) + } else { + chat = model.startChat() + } - model = firebaseService.generativeModel( - modelName: sample?.modelName ?? "gemini-2.5-flash", - systemInstruction: sample?.systemInstruction - ) + initialPrompt = sample?.initialPrompt ?? "" + title = sample?.title ?? "" - if let chatHistory = sample?.chatHistory, !chatHistory.isEmpty { - messages = ChatMessage.from(chatHistory) - chat = model.startChat(history: chatHistory) + fileDataParts = sample?.fileDataParts + if let fileDataParts = fileDataParts, !fileDataParts.isEmpty { + for fileDataPart in fileDataParts { + attachments.append(MultimodalAttachment(fileDataPart: fileDataPart)) + } + } + } + + func sendMessage(_ text: String, streaming: Bool = true) async { + error = nil + if streaming { + await internalSendMessageStreaming(text) + } else { + await internalSendMessage(text) + } + } + + func startNewChat() { + stop() + error = nil + chat = model.startChat() + messages.removeAll() + attachments.removeAll() + initialPrompt = "" + } + + func stop() { + chatTask?.cancel() + error = nil + } + + private func internalSendMessageStreaming(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + inProgress = true + defer { + inProgress = false + } + + let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) + messages.append(userMessage) + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var parts: [any PartsRepresentable] = [text] + + if backendType == .vertexAI, let fileDataParts = fileDataParts { + // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. + // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. + // if you do not want to use Cloud Storage, you can remove this `if` statement. + // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage + for fileDataPart in fileDataParts { + parts.append(fileDataPart) + } } else { - chat = model.startChat() + for attachment in attachments { + if let inlineDataPart = await attachment.toInlineDataPart() { + parts.append(inlineDataPart) + } + } } - initialPrompt = sample?.initialPrompt ?? "" - title = sample?.title ?? "" + attachments.removeAll() - fileDataParts = sample?.fileDataParts - if let fileDataParts = fileDataParts, !fileDataParts.isEmpty { - for fileDataPart in fileDataParts { - attachments.append(MultimodalAttachment(fileDataPart: fileDataPart)) - } + let responseStream = try chat.sendMessageStream(parts) + for try await chunk in responseStream { + messages[messages.count - 1].pending = false + if let text = chunk.text { + messages[messages.count - 1] + .content = (messages[messages.count - 1].content ?? "") + text + } } + } catch { + self.error = error + logger.error("\(error.localizedDescription)") + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } - - func sendMessage(_ text: String, streaming: Bool = true) async { - error = nil - if streaming { - await internalSendMessageStreaming(text) + } + + private func internalSendMessage(_ text: String) async { + chatTask?.cancel() + + chatTask = Task { + inProgress = true + defer { + inProgress = false + } + let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) + messages.append(userMessage) + + let systemMessage = ChatMessage.pending(participant: .other) + messages.append(systemMessage) + + do { + var parts: [any PartsRepresentable] = [text] + + if backendType == .vertexAI, let fileDataParts = fileDataParts { + // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. + // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. + // if you do not want to use Cloud Storage, you can remove this `if` statement. + // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage + for fileDataPart in fileDataParts { + parts.append(fileDataPart) + } } else { - await internalSendMessage(text) + for attachment in attachments { + if let inlineDataPart = await attachment.toInlineDataPart() { + parts.append(inlineDataPart) + } + } } - } - func startNewChat() { - stop() - error = nil - chat = model.startChat() - messages.removeAll() attachments.removeAll() - initialPrompt = "" - } - func stop() { - chatTask?.cancel() - error = nil - } + let response = try await chat.sendMessage(parts) - private func internalSendMessageStreaming(_ text: String) async { - chatTask?.cancel() - - chatTask = Task { - inProgress = true - defer { - inProgress = false - } - - let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) - messages.append(userMessage) - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var parts: [any PartsRepresentable] = [text] - - if backendType == .vertexAI, let fileDataParts = fileDataParts { - // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. - // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. - // if you do not want to use Cloud Storage, you can remove this `if` statement. - // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage - for fileDataPart in fileDataParts { - parts.append(fileDataPart) - } - } else { - for attachment in attachments { - if let inlineDataPart = await attachment.toInlineDataPart() { - parts.append(inlineDataPart) - } - } - } - - attachments.removeAll() - - let responseStream = try chat.sendMessageStream(parts) - for try await chunk in responseStream { - messages[messages.count - 1].pending = false - if let text = chunk.text { - messages[messages.count - 1] - .content = (messages[messages.count - 1].content ?? "") + text - } - } - } catch { - self.error = error - logger.error("\(error.localizedDescription)") - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } + if let responseText = response.text { + messages[messages.count - 1].content = responseText + messages[messages.count - 1].pending = false } + } catch { + self.error = error + logger.error("\(error.localizedDescription)") + let errorMessage = ChatMessage(content: "An error occurred. Please try again.", + participant: .other, + error: error, + pending: false) + messages[messages.count - 1] = errorMessage + } } + } - private func internalSendMessage(_ text: String) async { - chatTask?.cancel() + func addAttachment(_ attachment: MultimodalAttachment) { + attachments.append(attachment) + } - chatTask = Task { - inProgress = true - defer { - inProgress = false - } - let userMessage = ChatMessage(content: text, participant: .user, attachments: attachments) - messages.append(userMessage) - - let systemMessage = ChatMessage.pending(participant: .other) - messages.append(systemMessage) - - do { - var parts: [any PartsRepresentable] = [text] - - if backendType == .vertexAI, let fileDataParts = fileDataParts { - // This is a patch for Cloud Storage support. Only available when using Vertex AI Gemini API. - // For non-text inputs (e.g., media files), you can attach files from Cloud Storage to the request. - // if you do not want to use Cloud Storage, you can remove this `if` statement. - // Reference: https://firebase.google.com/docs/ai-logic/solutions/cloud-storage - for fileDataPart in fileDataParts { - parts.append(fileDataPart) - } - } else { - for attachment in attachments { - if let inlineDataPart = await attachment.toInlineDataPart() { - parts.append(inlineDataPart) - } - } - } - - attachments.removeAll() - - let response = try await chat.sendMessage(parts) - - if let responseText = response.text { - messages[messages.count - 1].content = responseText - messages[messages.count - 1].pending = false - } - } catch { - self.error = error - logger.error("\(error.localizedDescription)") - let errorMessage = ChatMessage(content: "An error occurred. Please try again.", - participant: .other, - error: error, - pending: false) - messages[messages.count - 1] = errorMessage - } - } - } - - func addAttachment(_ attachment: MultimodalAttachment) { - attachments.append(attachment) + func removeAttachment(_ attachment: MultimodalAttachment) { + if attachment.isCloudStorage { + // Remove corresponding fileDataPart when attachment is deleted. + fileDataParts?.removeAll { $0.uri == attachment.url?.absoluteString } } - func removeAttachment(_ attachment: MultimodalAttachment) { - if attachment.isCloudStorage { - // Remove corresponding fileDataPart when attachment is deleted. - fileDataParts?.removeAll { $0.uri == attachment.url?.absoluteString } - } - - attachments.removeAll { $0.id == attachment.id } - } + attachments.removeAll { $0.id == attachment.id } + } } diff --git a/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift b/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift index fa922a6d5..5ba537de0 100644 --- a/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift +++ b/firebaseai/FirebaseAIExample/Features/Multimodal/Views/AttachmentPreviewCard.swift @@ -15,157 +15,157 @@ import SwiftUI private enum AttachmentType: String { - case image, video, audio, pdf, other - - init(mimeType: String) { - let mt = mimeType.lowercased() - if mt.hasPrefix("image/") { self = .image } - else if mt.hasPrefix("video/") { self = .video } - else if mt.hasPrefix("audio/") { self = .audio } - else if mt == "application/pdf" { self = .pdf } - else { self = .other } + case image, video, audio, pdf, other + + init(mimeType: String) { + let mt = mimeType.lowercased() + if mt.hasPrefix("image/") { self = .image } + else if mt.hasPrefix("video/") { self = .video } + else if mt.hasPrefix("audio/") { self = .audio } + else if mt == "application/pdf" { self = .pdf } + else { self = .other } + } + + var systemImageName: String { + switch self { + case .image: return "photo" + case .video: return "video" + case .audio: return "waveform" + case .pdf: return "doc.text" + case .other: return "questionmark" } - - var systemImageName: String { - switch self { - case .image: return "photo" - case .video: return "video" - case .audio: return "waveform" - case .pdf: return "doc.text" - case .other: return "questionmark" - } - } - - var typeTagColor: Color { - switch self { - case .image: return .green - case .video: return .purple - case .audio: return .orange - case .pdf: return .red - case .other: return .blue - } + } + + var typeTagColor: Color { + switch self { + case .image: return .green + case .video: return .purple + case .audio: return .orange + case .pdf: return .red + case .other: return .blue } - - var displayFileType: String { - switch self { - case .image: return "IMAGE" - case .video: return "VIDEO" - case .audio: return "AUDIO" - case .pdf: return "PDF" - case .other: return "UNKNOWN" - } + } + + var displayFileType: String { + switch self { + case .image: return "IMAGE" + case .video: return "VIDEO" + case .audio: return "AUDIO" + case .pdf: return "PDF" + case .other: return "UNKNOWN" } + } } struct AttachmentPreviewCard: View { - let attachment: MultimodalAttachment - - private var attachmentType: AttachmentType { - AttachmentType(mimeType: attachment.mimeType) - } - - var body: some View { - HStack(spacing: 12) { - Image(systemName: attachmentType.systemImageName) - .font(.system(size: 20)) - .foregroundColor(.blue) - .frame(width: 40, height: 40) - .background(Color.blue.opacity(0.1)) - .clipShape(RoundedRectangle(cornerRadius: 6)) - - VStack(alignment: .leading, spacing: 4) { - Text(displayName) - .font(.system(size: 14, weight: .medium)) - .lineLimit(1) - .truncationMode(.middle) - .foregroundColor(.primary) - - HStack(spacing: 8) { - Text(attachmentType.displayFileType) - .font(.system(size: 10, weight: .semibold)) - .padding(.horizontal, 6) - .padding(.vertical, 2) - .background(attachmentType.typeTagColor) - .foregroundColor(.white) - .clipShape(Capsule()) - - Spacer() - } - } + let attachment: MultimodalAttachment + + private var attachmentType: AttachmentType { + AttachmentType(mimeType: attachment.mimeType) + } + + var body: some View { + HStack(spacing: 12) { + Image(systemName: attachmentType.systemImageName) + .font(.system(size: 20)) + .foregroundColor(.blue) + .frame(width: 40, height: 40) + .background(Color.blue.opacity(0.1)) + .clipShape(RoundedRectangle(cornerRadius: 6)) + + VStack(alignment: .leading, spacing: 4) { + Text(displayName) + .font(.system(size: 14, weight: .medium)) + .lineLimit(1) + .truncationMode(.middle) + .foregroundColor(.primary) + + HStack(spacing: 8) { + Text(attachmentType.displayFileType) + .font(.system(size: 10, weight: .semibold)) + .padding(.horizontal, 6) + .padding(.vertical, 2) + .background(attachmentType.typeTagColor) + .foregroundColor(.white) + .clipShape(Capsule()) + + Spacer() } - .frame(width: 180) - .padding(12) - .background(Color(.systemGray6)) - .clipShape(RoundedRectangle(cornerRadius: 12)) - .overlay( - RoundedRectangle(cornerRadius: 12) - .stroke(Color(.separator), lineWidth: 0.5) - ) + } } - - private var displayName: String { - let fileName = attachment.url?.lastPathComponent ?? "Default" - let maxLength = 30 - if fileName.count <= maxLength { - return fileName - } - - let prefixName = fileName.prefix(15) - let suffixName = fileName.suffix(10) - return "\(prefixName)...\(suffixName)" + .frame(width: 180) + .padding(12) + .background(Color(.systemGray6)) + .clipShape(RoundedRectangle(cornerRadius: 12)) + .overlay( + RoundedRectangle(cornerRadius: 12) + .stroke(Color(.separator), lineWidth: 0.5) + ) + } + + private var displayName: String { + let fileName = attachment.url?.lastPathComponent ?? "Default" + let maxLength = 30 + if fileName.count <= maxLength { + return fileName } + + let prefixName = fileName.prefix(15) + let suffixName = fileName.suffix(10) + return "\(prefixName)...\(suffixName)" + } } struct AttachmentPreviewScrollView: View { - let attachments: [MultimodalAttachment] - - var body: some View { - if !attachments.isEmpty { - ScrollView(.horizontal, showsIndicators: false) { - HStack { - ForEach(attachments) { attachment in - AttachmentPreviewCard( - attachment: attachment, - ) - } - } - .padding(.horizontal, 8) - } - } else { - EmptyView() + let attachments: [MultimodalAttachment] + + var body: some View { + if !attachments.isEmpty { + ScrollView(.horizontal, showsIndicators: false) { + HStack { + ForEach(attachments) { attachment in + AttachmentPreviewCard( + attachment: attachment, + ) + } } + .padding(.horizontal, 8) + } + } else { + EmptyView() } + } } #Preview { - VStack(spacing: 20) { - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "image/jpeg", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "application/pdf", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "video/mp4", - data: Data() - ), - ) - - AttachmentPreviewCard( - attachment: MultimodalAttachment( - mimeType: "audio/mpeg", - data: Data() - ), - ) - } - .padding() + VStack(spacing: 20) { + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "image/jpeg", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "application/pdf", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "video/mp4", + data: Data() + ), + ) + + AttachmentPreviewCard( + attachment: MultimodalAttachment( + mimeType: "audio/mpeg", + data: Data() + ), + ) + } + .padding() } diff --git a/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift b/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift index c3011a778..99194a765 100644 --- a/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift +++ b/firebaseai/FirebaseAIExample/FirebaseAIExampleApp.swift @@ -17,43 +17,42 @@ import SwiftUI import TipKit class AppDelegate: NSObject, UIApplicationDelegate { - func application(_: UIApplication, - didFinishLaunchingWithOptions _: [UIApplication - .LaunchOptionsKey: Any]? = nil) -> Bool - { - // Recommendation: Protect your Vertex AI API resources from abuse by preventing unauthorized - // clients using App Check; see https://firebase.google.com/docs/app-check#get_started. - - FirebaseApp.configure() - - if let firebaseApp = FirebaseApp.app(), firebaseApp.options.projectID == "mockproject-1234" { - guard let bundleID = Bundle.main.bundleIdentifier else { fatalError() } - fatalError(""" - You must create and/or download a valid `GoogleService-Info.plist` file for \(bundleID) from \ - https://console.firebase.google.com to run this example. Replace the existing \ - `GoogleService-Info.plist` file in the `firebaseai` directory with this new file. - """) - } - - return true + func application(_ application: UIApplication, + didFinishLaunchingWithOptions launchOptions: [UIApplication + .LaunchOptionsKey: Any]? = nil) -> Bool { + // Recommendation: Protect your Vertex AI API resources from abuse by preventing unauthorized + // clients using App Check; see https://firebase.google.com/docs/app-check#get_started. + + FirebaseApp.configure() + + if let firebaseApp = FirebaseApp.app(), firebaseApp.options.projectID == "mockproject-1234" { + guard let bundleID = Bundle.main.bundleIdentifier else { fatalError() } + fatalError(""" + You must create and/or download a valid `GoogleService-Info.plist` file for \(bundleID) from \ + https://console.firebase.google.com to run this example. Replace the existing \ + `GoogleService-Info.plist` file in the `firebaseai` directory with this new file. + """) } + + return true + } } @main struct FirebaseAIExampleApp: App { - @UIApplicationDelegateAdaptor var appDelegate: AppDelegate - - init() { - do { - try Tips.configure() - } catch { - print("Error initializing tips: \(error)") - } + @UIApplicationDelegateAdaptor var appDelegate: AppDelegate + + init() { + do { + try Tips.configure() + } catch { + print("Error initializing tips: \(error)") } + } - var body: some Scene { - WindowGroup { - ContentView() - } + var body: some Scene { + WindowGroup { + ContentView() } + } } diff --git a/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift b/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift index 1ea8bd155..344a97472 100644 --- a/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift +++ b/firebaseai/FirebaseAIExample/Shared/ApplicationError.swift @@ -16,15 +16,15 @@ import Foundation /// Generic error for issues that occur within the application. public struct ApplicationError: Error, Sendable, CustomNSError { - let localizedDescription: String + let localizedDescription: String - init(_ localizedDescription: String) { - self.localizedDescription = localizedDescription - } + init(_ localizedDescription: String) { + self.localizedDescription = localizedDescription + } - public var errorUserInfo: [String: Any] { - [ - NSLocalizedDescriptionKey: localizedDescription, - ] - } + public var errorUserInfo: [String: Any] { + [ + NSLocalizedDescriptionKey: localizedDescription, + ] + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift index 3e260d96c..504d09620 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioBufferHelpers.swift @@ -15,77 +15,77 @@ import AVFoundation extension AVAudioPCMBuffer { - /// Creates a new `AVAudioPCMBuffer` from a `Data` struct. - /// - /// Only works with interleaved data. - static func fromInterleavedData(data: Data, format: AVAudioFormat) throws -> AVAudioPCMBuffer? { - guard format.isInterleaved else { - throw ApplicationError("Only interleaved data is supported") - } - - let frameCapacity = AVAudioFrameCount(data - .count / Int(format.streamDescription.pointee.mBytesPerFrame)) - guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: frameCapacity) else { - return nil - } + /// Creates a new `AVAudioPCMBuffer` from a `Data` struct. + /// + /// Only works with interleaved data. + static func fromInterleavedData(data: Data, format: AVAudioFormat) throws -> AVAudioPCMBuffer? { + guard format.isInterleaved else { + throw ApplicationError("Only interleaved data is supported") + } - buffer.frameLength = frameCapacity - data.withUnsafeBytes { bytes in - guard let baseAddress = bytes.baseAddress else { return } - let dst = buffer.mutableAudioBufferList.pointee.mBuffers - dst.mData?.copyMemory(from: baseAddress, byteCount: Int(dst.mDataByteSize)) - } + let frameCapacity = AVAudioFrameCount(data + .count / Int(format.streamDescription.pointee.mBytesPerFrame)) + guard let buffer = AVAudioPCMBuffer(pcmFormat: format, frameCapacity: frameCapacity) else { + return nil + } - return buffer + buffer.frameLength = frameCapacity + data.withUnsafeBytes { bytes in + guard let baseAddress = bytes.baseAddress else { return } + let dst = buffer.mutableAudioBufferList.pointee.mBuffers + dst.mData?.copyMemory(from: baseAddress, byteCount: Int(dst.mDataByteSize)) } - /// Gets the underlying `Data` in this buffer. - /// - /// Will throw an error if this buffer doesn't hold int16 data. - func int16Data() throws -> Data { - guard let bufferPtr = audioBufferList.pointee.mBuffers.mData else { - throw ApplicationError("Missing audio buffer list") - } + return buffer + } - let audioBufferLenth = Int(audioBufferList.pointee.mBuffers.mDataByteSize) - return Data(bytes: bufferPtr, count: audioBufferLenth) + /// Gets the underlying `Data` in this buffer. + /// + /// Will throw an error if this buffer doesn't hold int16 data. + func int16Data() throws -> Data { + guard let bufferPtr = audioBufferList.pointee.mBuffers.mData else { + throw ApplicationError("Missing audio buffer list") } + + let audioBufferLenth = Int(audioBufferList.pointee.mBuffers.mDataByteSize) + return Data(bytes: bufferPtr, count: audioBufferLenth) + } } extension AVAudioConverter { - /// Uses the converter to convert the provided `buffer`. - /// - /// Will handle determining the proper frame capacity, ensuring formats align, and propagating any - /// errors that occur. - /// - /// - Returns: A new buffer, with the converted data. - func convertBuffer(_ buffer: AVAudioPCMBuffer) throws -> AVAudioPCMBuffer { - if buffer.format == outputFormat { return buffer } - guard buffer.format == inputFormat else { - throw ApplicationError("The buffer's format was different than the converter's input format") - } - - let frameCapacity = AVAudioFrameCount( - ceil(Double(buffer.frameLength) * outputFormat.sampleRate / inputFormat.sampleRate) - ) + /// Uses the converter to convert the provided `buffer`. + /// + /// Will handle determining the proper frame capacity, ensuring formats align, and propagating any + /// errors that occur. + /// + /// - Returns: A new buffer, with the converted data. + func convertBuffer(_ buffer: AVAudioPCMBuffer) throws -> AVAudioPCMBuffer { + if buffer.format == outputFormat { return buffer } + guard buffer.format == inputFormat else { + throw ApplicationError("The buffer's format was different than the converter's input format") + } - guard let output = AVAudioPCMBuffer( - pcmFormat: outputFormat, - frameCapacity: frameCapacity - ) else { - throw ApplicationError("Failed to create output buffer") - } + let frameCapacity = AVAudioFrameCount( + ceil(Double(buffer.frameLength) * outputFormat.sampleRate / inputFormat.sampleRate) + ) - var error: NSError? - convert(to: output, error: &error) { _, status in - status.pointee = .haveData - return buffer - } + guard let output = AVAudioPCMBuffer( + pcmFormat: outputFormat, + frameCapacity: frameCapacity + ) else { + throw ApplicationError("Failed to create output buffer") + } - if let error { - throw ApplicationError("Failed to convert buffer: \(error.localizedDescription)") - } + var error: NSError? + convert(to: output, error: &error) { _, status in + status.pointee = .haveData + return buffer + } - return output + if let error { + throw ApplicationError("Failed to convert buffer: \(error.localizedDescription)") } + + return output + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift index fe551025a..ed224cf48 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioPlayer.swift @@ -18,71 +18,71 @@ import OSLog /// Plays back audio through the primary output device. class AudioPlayer { - private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") + private var logger = Logger(subsystem: Bundle.main.bundleIdentifier!, category: "generative-ai") - private let engine: AVAudioEngine - private let inputFormat: AVAudioFormat - private let outputFormat: AVAudioFormat - private let playbackNode: AVAudioPlayerNode - private var formatConverter: AVAudioConverter + private let engine: AVAudioEngine + private let inputFormat: AVAudioFormat + private let outputFormat: AVAudioFormat + private let playbackNode: AVAudioPlayerNode + private var formatConverter: AVAudioConverter - init(engine: AVAudioEngine, inputFormat: AVAudioFormat, outputFormat: AVAudioFormat) throws { - self.engine = engine + init(engine: AVAudioEngine, inputFormat: AVAudioFormat, outputFormat: AVAudioFormat) throws { + self.engine = engine - guard let formatConverter = AVAudioConverter(from: inputFormat, to: outputFormat) else { - throw ApplicationError("Failed to create the audio converter") - } - - let playbackNode = AVAudioPlayerNode() - - engine.attach(playbackNode) - engine.connect(playbackNode, to: engine.mainMixerNode, format: outputFormat) - - self.inputFormat = inputFormat - self.outputFormat = outputFormat - self.formatConverter = formatConverter - self.playbackNode = playbackNode + guard let formatConverter = AVAudioConverter(from: inputFormat, to: outputFormat) else { + throw ApplicationError("Failed to create the audio converter") } - deinit { - stop() - } + let playbackNode = AVAudioPlayerNode() - /// Queue audio to be played through the output device. - /// - /// Note that in a real app, you'd ideally schedule the data before converting it, and then mark data as consumed after its been played - /// back. That way, if the audio route changes during playback, you can requeue the buffer on the new output device. - /// - /// For the sake of simplicity, that is not implemented here; a route change will prevent the currently queued conversation from - /// being played through the output device. - public func play(_ audio: Data) throws { - guard engine.isRunning else { - logger.warning("Audio engine needs to be running to play audio.") - return - } + engine.attach(playbackNode) + engine.connect(playbackNode, to: engine.mainMixerNode, format: outputFormat) - guard let inputBuffer = try AVAudioPCMBuffer.fromInterleavedData( - data: audio, - format: inputFormat - ) else { - throw ApplicationError("Failed to create input buffer for playback") - } + self.inputFormat = inputFormat + self.outputFormat = outputFormat + self.formatConverter = formatConverter + self.playbackNode = playbackNode + } - let buffer = try formatConverter.convertBuffer(inputBuffer) + deinit { + stop() + } - playbackNode.scheduleBuffer(buffer, at: nil) - playbackNode.play() + /// Queue audio to be played through the output device. + /// + /// Note that in a real app, you'd ideally schedule the data before converting it, and then mark data as consumed after its been played + /// back. That way, if the audio route changes during playback, you can requeue the buffer on the new output device. + /// + /// For the sake of simplicity, that is not implemented here; a route change will prevent the currently queued conversation from + /// being played through the output device. + public func play(_ audio: Data) throws { + guard engine.isRunning else { + logger.warning("Audio engine needs to be running to play audio.") + return } - /// Stops the current audio playing. - public func interrupt() { - playbackNode.stop() + guard let inputBuffer = try AVAudioPCMBuffer.fromInterleavedData( + data: audio, + format: inputFormat + ) else { + throw ApplicationError("Failed to create input buffer for playback") } - /// Permanently stop all audio playback. - public func stop() { - interrupt() - engine.disconnectNodeInput(playbackNode) - engine.disconnectNodeOutput(playbackNode) - } + let buffer = try formatConverter.convertBuffer(inputBuffer) + + playbackNode.scheduleBuffer(buffer, at: nil) + playbackNode.play() + } + + /// Stops the current audio playing. + public func interrupt() { + playbackNode.stop() + } + + /// Permanently stop all audio playback. + public func stop() { + interrupt() + engine.disconnectNodeInput(playbackNode) + engine.disconnectNodeOutput(playbackNode) + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift b/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift index 62e9833d8..7d182bad6 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/Microphone.swift @@ -12,51 +12,51 @@ // See the License for the specific language governing permissions and // limitations under the License. -import AVFoundation import Foundation +import AVFoundation /// Microphone bindings using Apple's AudioEngine API. class Microphone { - /// Data recorded from the microphone. - public let audio: AsyncStream - private let audioQueue: AsyncStream.Continuation - - private let inputNode: AVAudioInputNode - private let audioEngine: AVAudioEngine - - private var isRunning = false - - init(engine: AVAudioEngine) { - let (audio, audioQueue) = AsyncStream.makeStream() - - self.audio = audio - self.audioQueue = audioQueue - inputNode = engine.inputNode - audioEngine = engine - } - - deinit { - stop() - } - - public func start() { - guard !isRunning else { return } - isRunning = true - - // 50ms buffer size for balancing latency and cpu overhead - let targetBufferSize = UInt32(inputNode.outputFormat(forBus: 0).sampleRate / 20) - inputNode - .installTap(onBus: 0, bufferSize: targetBufferSize, format: nil) { [weak self] buffer, _ in - guard let self else { return } - audioQueue.yield(buffer) - } - } - - public func stop() { - audioQueue.finish() - if isRunning { - isRunning = false - inputNode.removeTap(onBus: 0) - } + /// Data recorded from the microphone. + public let audio: AsyncStream + private let audioQueue: AsyncStream.Continuation + + private let inputNode: AVAudioInputNode + private let audioEngine: AVAudioEngine + + private var isRunning = false + + init(engine: AVAudioEngine) { + let (audio, audioQueue) = AsyncStream.makeStream() + + self.audio = audio + self.audioQueue = audioQueue + inputNode = engine.inputNode + audioEngine = engine + } + + deinit { + stop() + } + + public func start() { + guard !isRunning else { return } + isRunning = true + + // 50ms buffer size for balancing latency and cpu overhead + let targetBufferSize = UInt32(inputNode.outputFormat(forBus: 0).sampleRate / 20) + inputNode + .installTap(onBus: 0, bufferSize: targetBufferSize, format: nil) { [weak self] buffer, _ in + guard let self else { return } + audioQueue.yield(buffer) + } + } + + public func stop() { + audioQueue.finish() + if isRunning { + isRunning = false + inputNode.removeTap(onBus: 0) } + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift b/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift index 8ac801d28..0731fba0c 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/BackendOption.swift @@ -15,8 +15,8 @@ import Foundation enum BackendOption: String, CaseIterable, Identifiable { - case googleAI = "Google AI" - case vertexAI = "Firebase Vertex AI" + case googleAI = "Google AI" + case vertexAI = "Firebase Vertex AI" - var id: String { rawValue } + var id: String { rawValue } } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift b/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift index 126a6866f..954a05872 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/Sample.swift @@ -14,303 +14,302 @@ import Foundation #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif public struct Sample: Identifiable { - public let id = UUID() - public let title: String - public let description: String - public let useCases: [UseCase] - public let navRoute: String - public let modelName: String - public let chatHistory: [ModelContent]? - public let initialPrompt: String? - public let systemInstruction: ModelContent? - public let tools: [Tool]? - public let generationConfig: GenerationConfig? - public let liveGenerationConfig: LiveGenerationConfig? - public let fileDataParts: [FileDataPart]? - public let tip: InlineTip? + public let id = UUID() + public let title: String + public let description: String + public let useCases: [UseCase] + public let navRoute: String + public let modelName: String + public let chatHistory: [ModelContent]? + public let initialPrompt: String? + public let systemInstruction: ModelContent? + public let tools: [Tool]? + public let generationConfig: GenerationConfig? + public let liveGenerationConfig: LiveGenerationConfig? + public let fileDataParts: [FileDataPart]? + public let tip: InlineTip? - public init(title: String, - description: String, - useCases: [UseCase], - navRoute: String, - modelName: String = "gemini-2.5-flash", - chatHistory: [ModelContent]? = nil, - initialPrompt: String? = nil, - systemInstruction: ModelContent? = nil, - tools: [Tool]? = nil, - generationConfig: GenerationConfig? = nil, - liveGenerationConfig: LiveGenerationConfig? = nil, - fileDataParts: [FileDataPart]? = nil, - tip: InlineTip? = nil) - { - self.title = title - self.description = description - self.useCases = useCases - self.navRoute = navRoute - self.modelName = modelName - self.chatHistory = chatHistory - self.initialPrompt = initialPrompt - self.systemInstruction = systemInstruction - self.tools = tools - self.generationConfig = generationConfig - self.liveGenerationConfig = liveGenerationConfig - self.fileDataParts = fileDataParts - self.tip = tip - } + public init(title: String, + description: String, + useCases: [UseCase], + navRoute: String, + modelName: String = "gemini-2.5-flash", + chatHistory: [ModelContent]? = nil, + initialPrompt: String? = nil, + systemInstruction: ModelContent? = nil, + tools: [Tool]? = nil, + generationConfig: GenerationConfig? = nil, + liveGenerationConfig: LiveGenerationConfig? = nil, + fileDataParts: [FileDataPart]? = nil, + tip: InlineTip? = nil) { + self.title = title + self.description = description + self.useCases = useCases + self.navRoute = navRoute + self.modelName = modelName + self.chatHistory = chatHistory + self.initialPrompt = initialPrompt + self.systemInstruction = systemInstruction + self.tools = tools + self.generationConfig = generationConfig + self.liveGenerationConfig = liveGenerationConfig + self.fileDataParts = fileDataParts + self.tip = tip + } } -public extension Sample { - static let samples: [Sample] = [ - // Text - Sample( - title: "Travel tips", - description: "The user wants the model to help a new traveler" + - " with travel tips", - useCases: [.text], - navRoute: "ChatScreen", - chatHistory: [ - ModelContent( - role: "user", - parts: "I have never traveled before. When should I book a flight?" - ), - ModelContent( - role: "model", - parts: "You should book flights a couple of months ahead of time. It will be cheaper and more flexible for you." - ), - ModelContent(role: "user", parts: "Do I need a passport?"), - ModelContent( - role: "model", - parts: "If you are traveling outside your own country, make sure your passport is up-to-date and valid for more than 6 months during your travel." - ), - ], - initialPrompt: "What else is important when traveling?", - systemInstruction: ModelContent(parts: "You are a Travel assistant. You will answer" + - " questions the user asks based on the information listed" + - " in Relevant Information. Do not hallucinate. Do not use" + - " the internet."), - ), - Sample( - title: "Hello world (with template)", - description: "Uses a template to say hello. The template uses 'name' and 'language' (defaults to Spanish) as inputs.", - useCases: [.text], - navRoute: "GenerateContentFromTemplateScreen", - initialPrompt: "Peter", - systemInstruction: ModelContent( - parts: "The user's name is {{name}}. They prefer to communicate in {{language}}." - ) - ), - Sample( - title: "Chatbot recommendations for courses", - description: "A chatbot suggests courses for a performing arts program.", - useCases: [.text], - navRoute: "ChatScreen", - initialPrompt: "I am interested in Performing Arts. I have taken Theater 1A.", - systemInstruction: ModelContent(parts: "You are a chatbot for the county's performing and fine arts" + - " program. You help students decide what course they will" + - " take during the summer."), - ), - // Image - Sample( - title: "Blog post creator", - description: "Create a blog post from an image file stored in Cloud Storage.", - useCases: [.image], - navRoute: "MultimodalScreen", - initialPrompt: "Write a short, engaging blog post based on this picture." + - " It should include a description of the meal in the" + - " photo and talk about my journey meal prepping.", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/meal-prep.jpeg", - mimeType: "image/jpeg" - ), - ] +extension Sample { + public static let samples: [Sample] = [ + // Text + Sample( + title: "Travel tips", + description: "The user wants the model to help a new traveler" + + " with travel tips", + useCases: [.text], + navRoute: "ChatScreen", + chatHistory: [ + ModelContent( + role: "user", + parts: "I have never traveled before. When should I book a flight?" ), - Sample( - title: "Imagen - image generation", - description: "Generate images using Imagen 3", - useCases: [.image], - navRoute: "ImagenScreen", - initialPrompt: "A photo of a modern building with water in the background" + ModelContent( + role: "model", + parts: "You should book flights a couple of months ahead of time. It will be cheaper and more flexible for you." ), - Sample( - title: "[T] Imagen - image generation", - description: "[T] Generate images using Imagen 3", - useCases: [.image], - navRoute: "ImagenFromTemplateScreen", - initialPrompt: "A photo of a modern building with water in the background" + ModelContent(role: "user", parts: "Do I need a passport?"), + ModelContent( + role: "model", + parts: "If you are traveling outside your own country, make sure your passport is up-to-date and valid for more than 6 months during your travel." ), - Sample( - title: "Gemini Flash - image generation", - description: "Generate and/or edit images using Gemini 2.0 Flash", - useCases: [.image], - navRoute: "ChatScreen", - modelName: "gemini-2.0-flash-preview-image-generation", - initialPrompt: "Hi, can you create a 3d rendered image of a pig " + - "with wings and a top hat flying over a happy " + - "futuristic scifi city with lots of greenery?", - generationConfig: GenerationConfig(responseModalities: [.text, .image]), + ], + initialPrompt: "What else is important when traveling?", + systemInstruction: ModelContent(parts: "You are a Travel assistant. You will answer" + + " questions the user asks based on the information listed" + + " in Relevant Information. Do not hallucinate. Do not use" + + " the internet."), + ), + Sample( + title: "Hello world (with template)", + description: "Uses a template to say hello. The template uses 'name' and 'language' (defaults to Spanish) as inputs.", + useCases: [.text], + navRoute: "GenerateContentFromTemplateScreen", + initialPrompt: "Peter", + systemInstruction: ModelContent( + parts: "The user's name is {{name}}. They prefer to communicate in {{language}}." + ) + ), + Sample( + title: "Chatbot recommendations for courses", + description: "A chatbot suggests courses for a performing arts program.", + useCases: [.text], + navRoute: "ChatScreen", + initialPrompt: "I am interested in Performing Arts. I have taken Theater 1A.", + systemInstruction: ModelContent(parts: "You are a chatbot for the county's performing and fine arts" + + " program. You help students decide what course they will" + + " take during the summer."), + ), + // Image + Sample( + title: "Blog post creator", + description: "Create a blog post from an image file stored in Cloud Storage.", + useCases: [.image], + navRoute: "MultimodalScreen", + initialPrompt: "Write a short, engaging blog post based on this picture." + + " It should include a description of the meal in the" + + " photo and talk about my journey meal prepping.", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/image/meal-prep.jpeg", + mimeType: "image/jpeg" ), - // Video - Sample( - title: "Hashtags for a video", - description: "Generate hashtags for a video ad stored in Cloud Storage.", - useCases: [.video], - navRoute: "MultimodalScreen", - initialPrompt: "Generate 5-10 hashtags that relate to the video content." + - " Try to use more popular and engaging terms," + - " e.g. #Viral. Do not add content not related to" + - " the video.\n Start the output with 'Tags:'", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/video/google_home_celebrity_ad.mp4", - mimeType: "video/mp4" - ), - ] + ] + ), + Sample( + title: "Imagen - image generation", + description: "Generate images using Imagen 3", + useCases: [.image], + navRoute: "ImagenScreen", + initialPrompt: "A photo of a modern building with water in the background" + ), + Sample( + title: "[T] Imagen - image generation", + description: "[T] Generate images using Imagen 3", + useCases: [.image], + navRoute: "ImagenFromTemplateScreen", + initialPrompt: "A photo of a modern building with water in the background" + ), + Sample( + title: "Gemini Flash - image generation", + description: "Generate and/or edit images using Gemini 2.0 Flash", + useCases: [.image], + navRoute: "ChatScreen", + modelName: "gemini-2.0-flash-preview-image-generation", + initialPrompt: "Hi, can you create a 3d rendered image of a pig " + + "with wings and a top hat flying over a happy " + + "futuristic scifi city with lots of greenery?", + generationConfig: GenerationConfig(responseModalities: [.text, .image]), + ), + // Video + Sample( + title: "Hashtags for a video", + description: "Generate hashtags for a video ad stored in Cloud Storage.", + useCases: [.video], + navRoute: "MultimodalScreen", + initialPrompt: "Generate 5-10 hashtags that relate to the video content." + + " Try to use more popular and engaging terms," + + " e.g. #Viral. Do not add content not related to" + + " the video.\n Start the output with 'Tags:'", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/video/google_home_celebrity_ad.mp4", + mimeType: "video/mp4" ), - Sample( - title: "Summarize video", - description: "Summarize a video and extract important dialogue.", - useCases: [.video], - navRoute: "MultimodalScreen", - chatHistory: [ - ModelContent(role: "user", parts: "Can you help me with the description of a video file?"), - ModelContent( - role: "model", - parts: "Sure! Click on the attach button below and choose a video file for me to describe." - ), - ], - initialPrompt: "I have attached the video file. Provide a description of" + - " the video. The description should also contain" + - " anything important which people say in the video." - ), - // Audio - Sample( - title: "Audio Summarization", - description: "Summarize an audio file", - useCases: [.audio], - navRoute: "MultimodalScreen", - chatHistory: [ - ModelContent(role: "user", parts: "Can you help me summarize an audio file?"), - ModelContent( - role: "model", - parts: "Of course! Click on the attach button below and choose an audio file for me to summarize." - ), - ], - initialPrompt: "I have attached the audio file. Please analyze it and summarize the contents" + - " of the audio as bullet points." + ] + ), + Sample( + title: "Summarize video", + description: "Summarize a video and extract important dialogue.", + useCases: [.video], + navRoute: "MultimodalScreen", + chatHistory: [ + ModelContent(role: "user", parts: "Can you help me with the description of a video file?"), + ModelContent( + role: "model", + parts: "Sure! Click on the attach button below and choose a video file for me to describe." ), - Sample( - title: "Translation from audio", - description: "Translate an audio file stored in Cloud Storage", - useCases: [.audio], - navRoute: "MultimodalScreen", - initialPrompt: "Please translate the audio in Mandarin.", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/audio/How_to_create_a_My_Map_in_Google_Maps.mp3", - mimeType: "audio/mp3" - ), - ] + ], + initialPrompt: "I have attached the video file. Provide a description of" + + " the video. The description should also contain" + + " anything important which people say in the video." + ), + // Audio + Sample( + title: "Audio Summarization", + description: "Summarize an audio file", + useCases: [.audio], + navRoute: "MultimodalScreen", + chatHistory: [ + ModelContent(role: "user", parts: "Can you help me summarize an audio file?"), + ModelContent( + role: "model", + parts: "Of course! Click on the attach button below and choose an audio file for me to summarize." ), - // Document - Sample( - title: "Document comparison", - description: "Compare the contents of 2 documents." + - " Supported by the Vertex AI Gemini API because the documents are stored in Cloud Storage", - useCases: [.document], - navRoute: "MultimodalScreen", - initialPrompt: "The first document is from 2013, and the second document is" + - " from 2023. How did the standard deduction evolve?", - fileDataParts: [ - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2013.pdf", - mimeType: "application/pdf" - ), - FileDataPart( - uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2023.pdf", - mimeType: "application/pdf" - ), - ] + ], + initialPrompt: "I have attached the audio file. Please analyze it and summarize the contents" + + " of the audio as bullet points." + ), + Sample( + title: "Translation from audio", + description: "Translate an audio file stored in Cloud Storage", + useCases: [.audio], + navRoute: "MultimodalScreen", + initialPrompt: "Please translate the audio in Mandarin.", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/audio/How_to_create_a_My_Map_in_Google_Maps.mp3", + mimeType: "audio/mp3" ), - // Function Calling - Sample( - title: "Weather Chat", - description: "Use function calling to get the weather conditions" + - " for a specific US city on a specific date.", - useCases: [.functionCalling, .text], - navRoute: "FunctionCallingScreen", - initialPrompt: "What was the weather in Boston, MA on October 17, 2024?", - tools: [.functionDeclarations([ - FunctionDeclaration( - name: "fetchWeather", - description: "Get the weather conditions for a specific US city on a specific date", - parameters: [ - "city": .string(description: "The US city of the location"), - "state": .string(description: "The US state of the location"), - "date": .string(description: "The date for which to get the weather." + - " Date must be in the format: YYYY-MM-DD"), - ] - ), - ])] + ] + ), + // Document + Sample( + title: "Document comparison", + description: "Compare the contents of 2 documents." + + " Supported by the Vertex AI Gemini API because the documents are stored in Cloud Storage", + useCases: [.document], + navRoute: "MultimodalScreen", + initialPrompt: "The first document is from 2013, and the second document is" + + " from 2023. How did the standard deduction evolve?", + fileDataParts: [ + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2013.pdf", + mimeType: "application/pdf" ), - // Grounding - Sample( - title: "Grounding with Google Search", - description: "Use Grounding with Google Search to get responses based on up-to-date information from the web.", - useCases: [.text], - navRoute: "GroundingScreen", - initialPrompt: "What's the weather in Chicago this weekend?", - tools: [.googleSearch()] + FileDataPart( + uri: "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/form_1040_2023.pdf", + mimeType: "application/pdf" ), - // Live API - Sample( - title: "Live native audio", - description: "Use the Live API to talk with the model via native audio.", - useCases: [.audio], - navRoute: "LiveScreen", - liveGenerationConfig: LiveGenerationConfig( - responseModalities: [.audio], - speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), - outputAudioTranscription: AudioTranscriptionConfig() - ) + ] + ), + // Function Calling + Sample( + title: "Weather Chat", + description: "Use function calling to get the weather conditions" + + " for a specific US city on a specific date.", + useCases: [.functionCalling, .text], + navRoute: "FunctionCallingScreen", + initialPrompt: "What was the weather in Boston, MA on October 17, 2024?", + tools: [.functionDeclarations([ + FunctionDeclaration( + name: "fetchWeather", + description: "Get the weather conditions for a specific US city on a specific date", + parameters: [ + "city": .string(description: "The US city of the location"), + "state": .string(description: "The US state of the location"), + "date": .string(description: "The date for which to get the weather." + + " Date must be in the format: YYYY-MM-DD"), + ] ), - Sample( - title: "Live function calling", - description: "Use function calling with the Live API to ask the model to change the background color.", - useCases: [.functionCalling, .audio], - navRoute: "LiveScreen", - tools: [ - .functionDeclarations([ - FunctionDeclaration( - name: "changeBackgroundColor", - description: "Changes the background color to the specified hex color.", - parameters: [ - "color": .string( - description: "Hex code of the color to change to. (eg, #F54927)" - ), - ], - ), - FunctionDeclaration( - name: "clearBackgroundColor", - description: "Removes the background color.", - parameters: [:] - ), - ]), + ])] + ), + // Grounding + Sample( + title: "Grounding with Google Search", + description: "Use Grounding with Google Search to get responses based on up-to-date information from the web.", + useCases: [.text], + navRoute: "GroundingScreen", + initialPrompt: "What's the weather in Chicago this weekend?", + tools: [.googleSearch()] + ), + // Live API + Sample( + title: "Live native audio", + description: "Use the Live API to talk with the model via native audio.", + useCases: [.audio], + navRoute: "LiveScreen", + liveGenerationConfig: LiveGenerationConfig( + responseModalities: [.audio], + speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), + outputAudioTranscription: AudioTranscriptionConfig() + ) + ), + Sample( + title: "Live function calling", + description: "Use function calling with the Live API to ask the model to change the background color.", + useCases: [.functionCalling, .audio], + navRoute: "LiveScreen", + tools: [ + .functionDeclarations([ + FunctionDeclaration( + name: "changeBackgroundColor", + description: "Changes the background color to the specified hex color.", + parameters: [ + "color": .string( + description: "Hex code of the color to change to. (eg, #F54927)" + ), ], - liveGenerationConfig: LiveGenerationConfig( - responseModalities: [.audio], - speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), - outputAudioTranscription: AudioTranscriptionConfig() - ), - tip: InlineTip(text: "Try asking the model to change the background color"), - ), - ] + ), + FunctionDeclaration( + name: "clearBackgroundColor", + description: "Removes the background color.", + parameters: [:] + ), + ]), + ], + liveGenerationConfig: LiveGenerationConfig( + responseModalities: [.audio], + speech: SpeechConfig(voiceName: "Zephyr", languageCode: "en-US"), + outputAudioTranscription: AudioTranscriptionConfig() + ), + tip: InlineTip(text: "Try asking the model to change the background color"), + ), + ] - static var sample = samples[0] + public static var sample = samples[0] } diff --git a/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift b/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift index d57573070..ee4e80f8a 100644 --- a/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift +++ b/firebaseai/FirebaseAIExample/Shared/Models/UseCase.swift @@ -15,13 +15,13 @@ import Foundation public enum UseCase: String, CaseIterable, Identifiable { - case all = "All" - case text = "Text" - case image = "Image" - case video = "Video" - case audio = "Audio" - case document = "Document" - case functionCalling = "Function Calling" + case all = "All" + case text = "Text" + case image = "Image" + case video = "Video" + case audio = "Audio" + case document = "Document" + case functionCalling = "Function Calling" - public var id: String { rawValue } + public var id: String { rawValue } } diff --git a/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift b/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift index 1e7f71aa1..74e0d1513 100644 --- a/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift +++ b/firebaseai/FirebaseAIExample/Shared/Util/Color+Hex.swift @@ -15,30 +15,30 @@ import SwiftUI extension Color { - /// Creates a new `Color` instance from a hex string. - /// - /// Supports both RGB and RGBA hex strings. - init?(hex: String) { - let hex = hex.replacingOccurrences(of: "#", with: "").uppercased() + /// Creates a new `Color` instance from a hex string. + /// + /// Supports both RGB and RGBA hex strings. + init?(hex: String) { + let hex = hex.replacingOccurrences(of: "#", with: "").uppercased() - var rgb: UInt64 = 0 - guard Scanner(string: hex).scanHexInt64(&rgb) else { return nil } + var rgb: UInt64 = 0 + guard Scanner(string: hex).scanHexInt64(&rgb) else { return nil } - var r: CGFloat = 0, g: CGFloat = 0, b: CGFloat = 0, a: CGFloat = 1 + var r: CGFloat = 0, g: CGFloat = 0, b: CGFloat = 0, a: CGFloat = 1 - if hex.count == 6 { - r = CGFloat((rgb & 0xFF0000) >> 16) / 255.0 - g = CGFloat((rgb & 0x00FF00) >> 8) / 255.0 - b = CGFloat(rgb & 0x0000FF) / 255.0 - } else if hex.count == 8 { - r = CGFloat((rgb & 0xFF00_0000) >> 24) / 255.0 - g = CGFloat((rgb & 0x00FF_0000) >> 16) / 255.0 - b = CGFloat((rgb & 0x0000_FF00) >> 8) / 255.0 - a = CGFloat(rgb & 0x0000_00FF) / 255.0 - } else { - return nil - } - - self.init(red: r, green: g, blue: b, opacity: a) + if hex.count == 6 { + r = CGFloat((rgb & 0xFF0000) >> 16) / 255.0 + g = CGFloat((rgb & 0x00FF00) >> 8) / 255.0 + b = CGFloat(rgb & 0x0000FF) / 255.0 + } else if hex.count == 8 { + r = CGFloat((rgb & 0xFF00_0000) >> 24) / 255.0 + g = CGFloat((rgb & 0x00FF_0000) >> 16) / 255.0 + b = CGFloat((rgb & 0x0000_FF00) >> 8) / 255.0 + a = CGFloat(rgb & 0x0000_00FF) / 255.0 + } else { + return nil } + + self.init(red: r, green: g, blue: b, opacity: a) + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift b/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift index 3bd6ae81c..cd1e3f60e 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/ErrorDetailsView.swift @@ -13,252 +13,252 @@ // limitations under the License. #if canImport(FirebaseAILogic) - import FirebaseAILogic + import FirebaseAILogic #else - import FirebaseAI + import FirebaseAI #endif import MarkdownUI import SwiftUI private extension HarmCategory { - /// Returns a description of the `HarmCategory` suitable for displaying in the UI. - var displayValue: String { - switch self { - case .dangerousContent: "Dangerous content" - case .harassment: "Harassment" - case .hateSpeech: "Hate speech" - case .sexuallyExplicit: "Sexually explicit" - case .civicIntegrity: "Civic integrity" - default: "Unknown HarmCategory: \(rawValue)" - } + /// Returns a description of the `HarmCategory` suitable for displaying in the UI. + var displayValue: String { + switch self { + case .dangerousContent: "Dangerous content" + case .harassment: "Harassment" + case .hateSpeech: "Hate speech" + case .sexuallyExplicit: "Sexually explicit" + case .civicIntegrity: "Civic integrity" + default: "Unknown HarmCategory: \(rawValue)" } + } } private extension SafetyRating.HarmProbability { - /// Returns a description of the `HarmProbability` suitable for displaying in the UI. - var displayValue: String { - switch self { - case .high: "High" - case .low: "Low" - case .medium: "Medium" - case .negligible: "Negligible" - default: "Unknown HarmProbability: \(rawValue)" - } + /// Returns a description of the `HarmProbability` suitable for displaying in the UI. + var displayValue: String { + switch self { + case .high: "High" + case .low: "Low" + case .medium: "Medium" + case .negligible: "Negligible" + default: "Unknown HarmProbability: \(rawValue)" } + } } private struct SubtitleFormRow: View { - var title: String - var value: String + var title: String + var value: String - var body: some View { - VStack(alignment: .leading) { - Text(title) - .font(.subheadline) - Text(value) - } + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Text(value) } + } } private struct SubtitleMarkdownFormRow: View { - var title: String - var value: String + var title: String + var value: String - var body: some View { - VStack(alignment: .leading) { - Text(title) - .font(.subheadline) - Markdown(value) - } + var body: some View { + VStack(alignment: .leading) { + Text(title) + .font(.subheadline) + Markdown(value) } + } } private struct SafetyRatingsSection: View { - var ratings: [SafetyRating] + var ratings: [SafetyRating] - var body: some View { - Section("Safety ratings") { - List(ratings, id: \.self) { rating in - HStack { - Text(rating.category.displayValue).font(.subheadline) - Spacer() - Text(rating.probability.displayValue) - } - } + var body: some View { + Section("Safety ratings") { + List(ratings, id: \.self) { rating in + HStack { + Text(rating.category.displayValue).font(.subheadline) + Spacer() + Text(rating.probability.displayValue) } + } } + } } struct ErrorDetailsView: View { - var error: Error + var error: Error - var body: some View { - NavigationView { - Form { - switch error { - case let GenerateContentError.internalError(underlying: underlyingError): - Section("Error Type") { - Text("Internal error") - } + var body: some View { + NavigationView { + Form { + switch error { + case let GenerateContentError.internalError(underlying: underlyingError): + Section("Error Type") { + Text("Internal error") + } - Section("Details") { - SubtitleFormRow(title: "Error description", - value: underlyingError.localizedDescription) - } + Section("Details") { + SubtitleFormRow(title: "Error description", + value: underlyingError.localizedDescription) + } - case let GenerateContentError.promptBlocked(response: generateContentResponse): - Section("Error Type") { - Text("Your prompt was blocked") - } + case let GenerateContentError.promptBlocked(response: generateContentResponse): + Section("Error Type") { + Text("Your prompt was blocked") + } - Section("Details") { - if let reason = generateContentResponse.promptFeedback?.blockReason { - SubtitleFormRow(title: "Reason for blocking", value: reason.rawValue) - } + Section("Details") { + if let reason = generateContentResponse.promptFeedback?.blockReason { + SubtitleFormRow(title: "Reason for blocking", value: reason.rawValue) + } - if let text = generateContentResponse.text { - SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) - } - } + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } - if let ratings = generateContentResponse.candidates.first?.safetyRatings { - SafetyRatingsSection(ratings: ratings) - } + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } - case let GenerateContentError.responseStoppedEarly( - reason: finishReason, - response: generateContentResponse - ): + case let GenerateContentError.responseStoppedEarly( + reason: finishReason, + response: generateContentResponse + ): - Section("Error Type") { - Text("Response stopped early") - } + Section("Error Type") { + Text("Response stopped early") + } - Section("Details") { - SubtitleFormRow(title: "Reason for finishing early", value: finishReason.rawValue) + Section("Details") { + SubtitleFormRow(title: "Reason for finishing early", value: finishReason.rawValue) - if let text = generateContentResponse.text { - SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) - } - } + if let text = generateContentResponse.text { + SubtitleMarkdownFormRow(title: "Last chunk for the response", value: text) + } + } - if let ratings = generateContentResponse.candidates.first?.safetyRatings { - SafetyRatingsSection(ratings: ratings) - } + if let ratings = generateContentResponse.candidates.first?.safetyRatings { + SafetyRatingsSection(ratings: ratings) + } - default: - Section("Error Type") { - Text("Some other error") - } + default: + Section("Error Type") { + Text("Some other error") + } - Section("Details") { - SubtitleFormRow(title: "Error description", value: error.localizedDescription) - } - } - } - .navigationTitle("Error details") - .navigationBarTitleDisplayMode(.inline) + Section("Details") { + SubtitleFormRow(title: "Error description", value: error.localizedDescription) + } } + } + .navigationTitle("Error details") + .navigationBarTitleDisplayMode(.inline) } + } } #Preview("Response Stopped Early") { - let error = GenerateContentError.responseStoppedEarly( - reason: .maxTokens, - response: GenerateContentResponse(candidates: [ - Candidate(content: ModelContent(role: "model", parts: - """ - A _hypothetical_ model response. - Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. - """), - safetyRatings: [ - SafetyRating( - category: .dangerousContent, - probability: .medium, - probabilityScore: 0.8, - severity: .medium, - severityScore: 0.9, - blocked: false - ), - SafetyRating( - category: .harassment, - probability: .low, - probabilityScore: 0.5, - severity: .low, - severityScore: 0.6, - blocked: false - ), - SafetyRating( - category: .hateSpeech, - probability: .low, - probabilityScore: 0.3, - severity: .medium, - severityScore: 0.2, - blocked: false - ), - SafetyRating( - category: .sexuallyExplicit, - probability: .low, - probabilityScore: 0.2, - severity: .negligible, - severityScore: 0.5, - blocked: false - ), - ], - finishReason: FinishReason.maxTokens, - citationMetadata: nil), - ]) - ) + let error = GenerateContentError.responseStoppedEarly( + reason: .maxTokens, + response: GenerateContentResponse(candidates: [ + Candidate(content: ModelContent(role: "model", parts: + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """), + safetyRatings: [ + SafetyRating( + category: .dangerousContent, + probability: .medium, + probabilityScore: 0.8, + severity: .medium, + severityScore: 0.9, + blocked: false + ), + SafetyRating( + category: .harassment, + probability: .low, + probabilityScore: 0.5, + severity: .low, + severityScore: 0.6, + blocked: false + ), + SafetyRating( + category: .hateSpeech, + probability: .low, + probabilityScore: 0.3, + severity: .medium, + severityScore: 0.2, + blocked: false + ), + SafetyRating( + category: .sexuallyExplicit, + probability: .low, + probabilityScore: 0.2, + severity: .negligible, + severityScore: 0.5, + blocked: false + ), + ], + finishReason: FinishReason.maxTokens, + citationMetadata: nil), + ]) + ) - return ErrorDetailsView(error: error) + return ErrorDetailsView(error: error) } #Preview("Prompt Blocked") { - let error = GenerateContentError.promptBlocked( - response: GenerateContentResponse(candidates: [ - Candidate(content: ModelContent(role: "model", parts: - """ - A _hypothetical_ model response. - Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. - """), - safetyRatings: [ - SafetyRating( - category: .dangerousContent, - probability: .low, - probabilityScore: 0.8, - severity: .medium, - severityScore: 0.9, - blocked: false - ), - SafetyRating( - category: .harassment, - probability: .low, - probabilityScore: 0.5, - severity: .low, - severityScore: 0.6, - blocked: false - ), - SafetyRating( - category: .hateSpeech, - probability: .low, - probabilityScore: 0.3, - severity: .medium, - severityScore: 0.2, - blocked: false - ), - SafetyRating( - category: .sexuallyExplicit, - probability: .low, - probabilityScore: 0.2, - severity: .negligible, - severityScore: 0.5, - blocked: false - ), - ], - finishReason: FinishReason.other, - citationMetadata: nil), - ]) - ) + let error = GenerateContentError.promptBlocked( + response: GenerateContentResponse(candidates: [ + Candidate(content: ModelContent(role: "model", parts: + """ + A _hypothetical_ model response. + Cillum ex aliqua amet aliquip labore amet eiusmod consectetur reprehenderit sit commodo. + """), + safetyRatings: [ + SafetyRating( + category: .dangerousContent, + probability: .low, + probabilityScore: 0.8, + severity: .medium, + severityScore: 0.9, + blocked: false + ), + SafetyRating( + category: .harassment, + probability: .low, + probabilityScore: 0.5, + severity: .low, + severityScore: 0.6, + blocked: false + ), + SafetyRating( + category: .hateSpeech, + probability: .low, + probabilityScore: 0.3, + severity: .medium, + severityScore: 0.2, + blocked: false + ), + SafetyRating( + category: .sexuallyExplicit, + probability: .low, + probabilityScore: 0.2, + severity: .negligible, + severityScore: 0.5, + blocked: false + ), + ], + finishReason: FinishReason.other, + citationMetadata: nil), + ]) + ) - return ErrorDetailsView(error: error) + return ErrorDetailsView(error: error) } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift b/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift index a1f831365..1e6e436d0 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/FilterChipView.swift @@ -15,40 +15,40 @@ import SwiftUI struct FilterChipView: View { - let useCase: UseCase - let isSelected: Bool - let action: () -> Void + let useCase: UseCase + let isSelected: Bool + let action: () -> Void - var body: some View { - Button(action: action) { - Text(useCase.rawValue) - .padding(.horizontal) - } - .filterChipStyle(isSelected: isSelected) + var body: some View { + Button(action: action) { + Text(useCase.rawValue) + .padding(.horizontal) } + .filterChipStyle(isSelected: isSelected) + } } private struct FilterChipStyle: ViewModifier { - let isSelected: Bool + let isSelected: Bool - func body(content: Content) -> some View { - if isSelected { - content.buttonStyle(.borderedProminent) - } else { - content.buttonStyle(.bordered) - } + func body(content: Content) -> some View { + if isSelected { + content.buttonStyle(.borderedProminent) + } else { + content.buttonStyle(.bordered) } + } } extension View { - func filterChipStyle(isSelected: Bool) -> some View { - modifier(FilterChipStyle(isSelected: isSelected)) - } + func filterChipStyle(isSelected: Bool) -> some View { + modifier(FilterChipStyle(isSelected: isSelected)) + } } #Preview { - VStack(spacing: 16) { - FilterChipView(useCase: .text, isSelected: true) {} - FilterChipView(useCase: .text, isSelected: false) {} - } + VStack(spacing: 16) { + FilterChipView(useCase: .text, isSelected: true) {} + FilterChipView(useCase: .text, isSelected: false) {} + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift b/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift index 8002dde3b..ff72d9b5e 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/InlineTip.swift @@ -15,43 +15,43 @@ import TipKit public struct InlineTip: Tip { - private let _text: String - private let _title: String - private let _icon: Image + private let _text: String + private let _title: String + private let _icon: Image - public init(text: String, title: String = "Tip", icon: Image = Image(systemName: "info.circle")) { - _text = text - _title = title - _icon = icon - } + public init(text: String, title: String = "Tip", icon: Image = Image(systemName: "info.circle")) { + _text = text + _title = title + _icon = icon + } - public var title: Text { - Text(_title) - } + public var title: Text { + Text(_title) + } - public var message: Text? { - Text(_text) - } + public var message: Text? { + Text(_text) + } - public var image: Image? { - _icon - } + public var image: Image? { + _icon + } } #Preview { - TipView(InlineTip(text: "Try asking the model to change the background color")) - TipView( - InlineTip( - text: "You shouldn't do that.", - title: "Warning", - icon: Image(systemName: "exclamationmark.circle") - ) + TipView(InlineTip(text: "Try asking the model to change the background color")) + TipView( + InlineTip( + text: "You shouldn't do that.", + title: "Warning", + icon: Image(systemName: "exclamationmark.circle") ) - TipView( - InlineTip( - text: "Oops, try again!", - title: "Error", - icon: Image(systemName: "x.circle") - ) + ) + TipView( + InlineTip( + text: "Oops, try again!", + title: "Error", + icon: Image(systemName: "x.circle") ) + ) } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift b/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift index bbc96980a..b2391f77b 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/ProgressOverlay.swift @@ -15,26 +15,26 @@ import SwiftUI struct ProgressOverlay: View { - var body: some View { - ZStack { - Color.black.opacity(0.3) - .ignoresSafeArea() + var body: some View { + ZStack { + Color.black.opacity(0.3) + .ignoresSafeArea() - ZStack { - RoundedRectangle(cornerRadius: 16) - .fill(Material.ultraThinMaterial) - .frame(width: 120, height: 100) - .shadow(radius: 8) + ZStack { + RoundedRectangle(cornerRadius: 16) + .fill(Material.ultraThinMaterial) + .frame(width: 120, height: 100) + .shadow(radius: 8) - VStack(spacing: 12) { - ProgressView() - .scaleEffect(1.5) + VStack(spacing: 12) { + ProgressView() + .scaleEffect(1.5) - Text("Loading...") - .font(.subheadline) - .foregroundColor(.secondary) - } - } + Text("Loading...") + .font(.subheadline) + .foregroundColor(.secondary) } + } } + } } diff --git a/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift b/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift index 85da2ae52..66bb862d5 100644 --- a/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift +++ b/firebaseai/FirebaseAIExample/Shared/Views/SampleCardView.swift @@ -15,111 +15,111 @@ import SwiftUI struct SampleCardView: View { - let sample: Sample + let sample: Sample - var body: some View { - GroupBox { - Text(sample.description) - .font(.system(size: 14)) - .foregroundColor(.secondary) - .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) - } label: { - if let useCase = sample.useCases.first { - Label(sample.title, systemImage: systemName(for: useCase)) - .font(.system(size: 17, weight: .medium)) - .foregroundColor(color(for: useCase)) - } else { - Text(sample.title) - .font(.system(size: 17, weight: .medium)) - } - } - .groupBoxStyle(CardGroupBoxStyle()) - .frame(maxWidth: .infinity, minHeight: 150, maxHeight: .infinity, alignment: .top) + var body: some View { + GroupBox { + Text(sample.description) + .font(.system(size: 14)) + .foregroundColor(.secondary) + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } label: { + if let useCase = sample.useCases.first { + Label(sample.title, systemImage: systemName(for: useCase)) + .font(.system(size: 17, weight: .medium)) + .foregroundColor(color(for: useCase)) + } else { + Text(sample.title) + .font(.system(size: 17, weight: .medium)) + } } + .groupBoxStyle(CardGroupBoxStyle()) + .frame(maxWidth: .infinity, minHeight: 150, maxHeight: .infinity, alignment: .top) + } - private func systemName(for useCase: UseCase) -> String { - switch useCase { - case .all: "square.grid.2x2.fill" - case .text: "text.bubble.fill" - case .image: "photo.fill" - case .video: "video.fill" - case .audio: "waveform" - case .document: "doc.fill" - case .functionCalling: "gearshape.2.fill" - } + private func systemName(for useCase: UseCase) -> String { + switch useCase { + case .all: "square.grid.2x2.fill" + case .text: "text.bubble.fill" + case .image: "photo.fill" + case .video: "video.fill" + case .audio: "waveform" + case .document: "doc.fill" + case .functionCalling: "gearshape.2.fill" } + } - private func color(for useCase: UseCase) -> Color { - switch useCase { - case .all:.primary - case .text:.blue - case .image:.purple - case .video:.red - case .audio:.orange - case .document:.gray - case .functionCalling:.green - } + private func color(for useCase: UseCase) -> Color { + switch useCase { + case .all:.primary + case .text:.blue + case .image:.purple + case .video:.red + case .audio:.orange + case .document:.gray + case .functionCalling:.green } + } } public struct CardGroupBoxStyle: GroupBoxStyle { - private var cornerRadius: CGFloat { - if #available(iOS 26.0, *) { - return 28 - } else { - return 12 - } + private var cornerRadius: CGFloat { + if #available(iOS 26.0, *) { + return 28 + } else { + return 12 } + } - public func makeBody(configuration: Configuration) -> some View { - VStack(alignment: .leading, spacing: 12) { - configuration.label - configuration.content - } - .padding() - .background(Color(.secondarySystemGroupedBackground)) - .clipShape(RoundedRectangle(cornerRadius: cornerRadius, style: .continuous)) + public func makeBody(configuration: Configuration) -> some View { + VStack(alignment: .leading, spacing: 12) { + configuration.label + configuration.content } + .padding() + .background(Color(.secondarySystemGroupedBackground)) + .clipShape(RoundedRectangle(cornerRadius: cornerRadius, style: .continuous)) + } } #Preview { - let samples = [ - Sample( - title: "Sample 1", - description: "This is the first sample card.", - useCases: [.text], - navRoute: "ConversationScreen" - ), - Sample( - title: "Sample 2", - description: "This is the second sample card.", - useCases: [.image], - navRoute: "PhotoReasoningScreen" - ), - Sample( - title: "Sample 3", - description: "This is the third sample card.", - useCases: [.video], - navRoute: "ConversationScreen" - ), - Sample( - title: "Sample 4", - description: "This is the fourth sample card, which is a bit longer to see how the text wraps and if everything still aligns correctly.", - useCases: [.audio], - navRoute: "ConversationScreen" - ), - ] + let samples = [ + Sample( + title: "Sample 1", + description: "This is the first sample card.", + useCases: [.text], + navRoute: "ConversationScreen" + ), + Sample( + title: "Sample 2", + description: "This is the second sample card.", + useCases: [.image], + navRoute: "PhotoReasoningScreen" + ), + Sample( + title: "Sample 3", + description: "This is the third sample card.", + useCases: [.video], + navRoute: "ConversationScreen" + ), + Sample( + title: "Sample 4", + description: "This is the fourth sample card, which is a bit longer to see how the text wraps and if everything still aligns correctly.", + useCases: [.audio], + navRoute: "ConversationScreen" + ), + ] - ScrollView { - LazyVGrid(columns: [ - GridItem(.flexible()), - GridItem(.flexible()), - ], spacing: 16) { - ForEach(samples) { sample in - SampleCardView(sample: sample) - } - } - .padding() + ScrollView { + LazyVGrid(columns: [ + GridItem(.flexible()), + GridItem(.flexible()), + ], spacing: 16) { + ForEach(samples) { sample in + SampleCardView(sample: sample) + } } - .background(Color(.systemGroupedBackground)) + .padding() + } + .background(Color(.systemGroupedBackground)) } From e5aaa3adf8379274b4c9f112dc02be60e6f0206c Mon Sep 17 00:00:00 2001 From: Nick Cooke Date: Thu, 11 Dec 2025 17:12:53 -0500 Subject: [PATCH 10/10] try again --- .../Shared/Audio/AudioController.swift | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift index 0152b1d5f..a8c7589ff 100644 --- a/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift +++ b/firebaseai/FirebaseAIExample/Shared/Audio/AudioController.swift @@ -50,7 +50,7 @@ actor AudioController { private var stopped = false - public init() async throws { + init() async throws { let session = AVAudioSession.sharedInstance() try session.setCategory( .playAndRecord, @@ -90,7 +90,7 @@ actor AudioController { } /// Kicks off audio processing, and returns a stream of recorded microphone audio data. - public func listenToMic() async throws -> AsyncStream { + func listenToMic() async throws -> AsyncStream { try await spawnAudioProcessingThread() return microphoneData } @@ -98,7 +98,7 @@ actor AudioController { /// Permanently stop all audio processing. /// /// To start again, create a new instance of ``AudioController``. - public func stop() async { + func stop() async { stopped = true await stopListeningAndPlayback() microphoneDataQueue.finish() @@ -106,12 +106,12 @@ actor AudioController { } /// Queues audio for playback. - public func playAudio(audio: Data) async throws { + func playAudio(audio: Data) async throws { try audioPlayer?.play(audio) } /// Interrupts and clears the currently pending audio playback queue. - public func interrupt() async { + func interrupt() async { audioPlayer?.interrupt() }