11/**
22 * Example usage of the @openrouter/sdk package
3- *
3+ *
44 * This demonstrates the OpenRouter TypeScript SDK's idiomatic usage patterns:
55 * - Type-safe client initialization
66 * - Non-streaming chat completions
77 * - Streaming chat completions with async iteration
88 * - Automatic usage tracking
9- *
9+ *
1010 * Run with: bun examples/basic/example-basic-openrouter-sdk.ts
1111 */
1212
13- import { OpenRouter } from " @openrouter/sdk" ;
13+ import { OpenRouter } from ' @openrouter/sdk' ;
1414
1515// Initialize the OpenRouter SDK client
1616// The SDK automatically reads OPENROUTER_API_KEY from environment
1717const openRouter = new OpenRouter ( {
18- apiKey : process . env . OPENROUTER_API_KEY ?? "" ,
18+ apiKey : process . env . OPENROUTER_API_KEY ?? '' ,
1919} ) ;
2020
2121async function nonStreamingExample ( ) {
22- console . log ( " === Non-Streaming Example ===\n" ) ;
22+ console . log ( ' === Non-Streaming Example ===\n' ) ;
2323
2424 // Basic chat completion - returns the full response at once
2525 const result = await openRouter . chat . send ( {
26- model : " openai/gpt-4o-mini" ,
26+ model : ' openai/gpt-4o-mini' ,
2727 messages : [
2828 {
29- role : " user" ,
30- content : " What is the capital of France?" ,
29+ role : ' user' ,
30+ content : ' What is the capital of France?' ,
3131 } ,
3232 ] ,
3333 stream : false , // Explicitly set stream to false for non-streaming
3434 } ) ;
3535
3636 // The SDK provides strong typing - result has 'choices' property
37- if ( " choices" in result && result . choices [ 0 ] ) {
38- console . log ( " Model:" , result . model ) ;
39- console . log ( " Response:" , result . choices [ 0 ] . message . content ) ;
40- console . log ( " Usage:" , result . usage ) ;
37+ if ( ' choices' in result && result . choices [ 0 ] ) {
38+ console . log ( ' Model:' , result . model ) ;
39+ console . log ( ' Response:' , result . choices [ 0 ] . message . content ) ;
40+ console . log ( ' Usage:' , result . usage ) ;
4141 console . log ( ) ;
4242 }
4343}
4444
4545async function streamingExample ( ) {
46- console . log ( " === Streaming Example ===\n" ) ;
46+ console . log ( ' === Streaming Example ===\n' ) ;
4747
4848 // Streaming chat completion - returns an async iterable
4949 const stream = await openRouter . chat . send ( {
50- model : " openai/gpt-4o-mini" ,
50+ model : ' openai/gpt-4o-mini' ,
5151 messages : [
5252 {
53- role : " user" ,
54- content : " Write a haiku about TypeScript" ,
53+ role : ' user' ,
54+ content : ' Write a haiku about TypeScript' ,
5555 } ,
5656 ] ,
5757 stream : true , // Enable streaming mode
@@ -60,8 +60,8 @@ async function streamingExample() {
6060 } ,
6161 } ) ;
6262
63- console . log ( " Streaming response:" ) ;
64- let fullContent = "" ;
63+ console . log ( ' Streaming response:' ) ;
64+ let fullContent = '' ;
6565
6666 // The SDK returns an async iterable that you can iterate with for-await-of
6767 for await ( const chunk of stream ) {
@@ -74,11 +74,11 @@ async function streamingExample() {
7474
7575 // Usage stats are included in the final chunk when streamOptions.includeUsage is true
7676 if ( chunk . usage ) {
77- console . log ( " \n\nStream usage:" , chunk . usage ) ;
77+ console . log ( ' \n\nStream usage:' , chunk . usage ) ;
7878 }
7979 }
8080
81- console . log ( " \n\nFull response:" , fullContent ) ;
81+ console . log ( ' \n\nFull response:' , fullContent ) ;
8282 console . log ( ) ;
8383}
8484
@@ -88,7 +88,7 @@ async function main() {
8888 await nonStreamingExample ( ) ;
8989 await streamingExample ( ) ;
9090 } catch ( error ) {
91- console . error ( " Error:" , error ) ;
91+ console . error ( ' Error:' , error ) ;
9292 process . exit ( 1 ) ;
9393 }
9494}
0 commit comments