88 * - Streaming responses with Effect streams
99 */
1010
11- import * as Chat from " @effect/ai/Chat"
12- import * as LanguageModel from " @effect/ai/LanguageModel"
13- import * as OpenRouterClient from " @effect/ai-openrouter/OpenRouterClient"
14- import * as OpenRouterLanguageModel from " @effect/ai-openrouter/OpenRouterLanguageModel"
15- import { FetchHttpClient } from " @effect/platform"
16- import * as BunContext from " @effect/platform-bun/BunContext"
17- import { Console , Effect , Layer , Redacted , Stream } from " effect"
11+ import * as OpenRouterClient from ' @effect/ai-openrouter/OpenRouterClient' ;
12+ import * as OpenRouterLanguageModel from ' @effect/ai-openrouter/OpenRouterLanguageModel' ;
13+ import * as Chat from ' @effect/ai/Chat' ;
14+ import * as LanguageModel from ' @effect/ai/LanguageModel' ;
15+ import { FetchHttpClient } from ' @effect/platform' ;
16+ import * as BunContext from ' @effect/platform-bun/BunContext' ;
17+ import { Console , Effect , Layer , Redacted , Stream } from ' effect' ;
1818
1919/**
2020 * Main program using Effect.gen for composable effects
@@ -25,63 +25,63 @@ import { Console, Effect, Layer, Redacted, Stream } from "effect"
2525 */
2626const program = Effect . gen ( function * ( ) {
2727 // Log separator for readability
28- yield * Console . log ( " \n=== Example 1: Simple Chat Completion ===\n" )
28+ yield * Console . log ( ' \n=== Example 1: Simple Chat Completion ===\n' ) ;
2929
3030 // Generate text using the language model
3131 // The LanguageModel service is injected via the Effect context
3232 const response = yield * LanguageModel . generateText ( {
33- prompt : " Explain what Effect is in functional programming in 2 sentences." ,
34- } )
33+ prompt : ' Explain what Effect is in functional programming in 2 sentences.' ,
34+ } ) ;
3535
3636 // Access the generated text from the response
37- yield * Console . log ( " Response:" , response . text )
38- yield * Console . log ( " Finish reason:" , response . finishReason )
39- yield * Console . log ( " Usage:" , response . usage )
37+ yield * Console . log ( ' Response:' , response . text ) ;
38+ yield * Console . log ( ' Finish reason:' , response . finishReason ) ;
39+ yield * Console . log ( ' Usage:' , response . usage ) ;
4040
4141 // Example 2: Stateful conversation with Chat
42- yield * Console . log ( " \n=== Example 2: Stateful Chat Conversation ===\n" )
42+ yield * Console . log ( ' \n=== Example 2: Stateful Chat Conversation ===\n' ) ;
4343
4444 // Chat.empty creates a new chat session with empty history
4545 // Chat maintains conversation context across multiple turns
46- const chat = yield * Chat . empty
46+ const chat = yield * Chat . empty ;
4747
4848 // First turn - the model responds to our greeting
4949 const greeting = yield * chat . generateText ( {
5050 prompt : "Hi! I'm learning about Effect." ,
51- } )
52- yield * Console . log ( " Assistant:" , greeting . text )
51+ } ) ;
52+ yield * Console . log ( ' Assistant:' , greeting . text ) ;
5353
5454 // Second turn - the model has context from the previous message
5555 // This demonstrates how Chat maintains conversation state
5656 const followUp = yield * chat . generateText ( {
57- prompt : " What are the main benefits?" ,
58- } )
59- yield * Console . log ( " Assistant:" , followUp . text )
57+ prompt : ' What are the main benefits?' ,
58+ } ) ;
59+ yield * Console . log ( ' Assistant:' , followUp . text ) ;
6060
6161 // Example 3: Streaming responses
62- yield * Console . log ( " \n=== Example 3: Streaming Text Generation ===\n" )
62+ yield * Console . log ( ' \n=== Example 3: Streaming Text Generation ===\n' ) ;
6363
64- yield * Console . log ( " Streaming response:" )
64+ yield * Console . log ( ' Streaming response:' ) ;
6565
6666 // streamText returns a Stream of response parts
6767 // Streams in Effect are lazy and composable
6868 // Stream.runForEach processes each part as it arrives
6969 yield * LanguageModel . streamText ( {
70- prompt : " Count from 1 to 5, explaining each number briefly." ,
70+ prompt : ' Count from 1 to 5, explaining each number briefly.' ,
7171 } ) . pipe (
7272 Stream . runForEach ( ( part ) => {
7373 // Only print text deltas to show streaming effect
74- if ( part . type === " text-delta" ) {
74+ if ( part . type === ' text-delta' ) {
7575 // TODO: print without newlines
76- return Console . log ( part . delta )
76+ return Console . log ( part . delta ) ;
7777 }
7878 // Log other part types for demonstration
79- return Console . log ( `[${ part . type } ]` )
80- } )
81- )
79+ return Console . log ( `[${ part . type } ]` ) ;
80+ } ) ,
81+ ) ;
8282
83- yield * Console . log ( " \n=== All examples completed ===" )
84- } )
83+ yield * Console . log ( ' \n=== All examples completed ===' ) ;
84+ } ) ;
8585
8686/**
8787 * Layer composition for dependency injection
@@ -98,22 +98,22 @@ const OpenRouterClientLayer = OpenRouterClient.layer({
9898} ) . pipe (
9999 // Provide the Fetch HTTP client implementation
100100 // Layer.provide composes layers, satisfying dependencies
101- Layer . provide ( FetchHttpClient . layer )
102- )
101+ Layer . provide ( FetchHttpClient . layer ) ,
102+ ) ;
103103
104104// Create the language model layer using OpenRouter
105105// This uses the "openai/gpt-4o-mini" model via OpenRouter
106106const OpenRouterModelLayer = OpenRouterLanguageModel . layer ( {
107- model : " openai/gpt-4o-mini" ,
107+ model : ' openai/gpt-4o-mini' ,
108108 config : {
109109 // Optional: configure model parameters
110110 temperature : 0.7 ,
111111 max_tokens : 500 ,
112112 } ,
113113} ) . pipe (
114114 // The model layer depends on the OpenRouter client
115- Layer . provide ( OpenRouterClientLayer )
116- )
115+ Layer . provide ( OpenRouterClientLayer ) ,
116+ ) ;
117117
118118/**
119119 * Run the program with dependency injection
@@ -131,7 +131,7 @@ await program.pipe(
131131 // Provide the Bun runtime context for platform services
132132 Effect . provide ( BunContext . layer ) ,
133133 // Run the effect - returns a Promise<void>
134- Effect . runPromise
135- )
134+ Effect . runPromise ,
135+ ) ;
136136
137- console . log ( " \n✓ Program completed successfully" )
137+ console . log ( ' \n✓ Program completed successfully' ) ;
0 commit comments