11import * as gptscript from "../src/gptscript"
22import {
33 ArgumentSchemaType ,
4- Credential , CredentialType ,
4+ CredentialType ,
55 getEnv ,
66 PropertyType ,
77 RunEventType ,
@@ -12,7 +12,7 @@ import {
1212import path from "path"
1313import { fileURLToPath } from "url"
1414import * as fs from "node:fs"
15- import { randomBytes } from "node:crypto" ;
15+ import { randomBytes } from "node:crypto"
1616
1717let gFirst : gptscript . GPTScript
1818let g : gptscript . GPTScript
@@ -172,6 +172,17 @@ describe("gptscript module", () => {
172172 const result = await ( await g . run ( testGptPath ) ) . text ( )
173173 expect ( result ) . toBeDefined ( )
174174 expect ( result ) . toContain ( "Calvin Coolidge" )
175+
176+ // Run it a second time and expect a cached result
177+ const run = await g . run ( testGptPath )
178+ const secondResult = await run . text ( )
179+ expect ( result ) . toBeDefined ( )
180+ expect ( secondResult ) . toStrictEqual ( result )
181+
182+ // There should be one call frame, and it should be cached
183+ for ( let c in run . calls ) {
184+ expect ( run . calls [ c ] . chatResponseCached ) . toBeTruthy ( )
185+ }
175186 } )
176187
177188 test ( "should override credentials correctly" , async ( ) => {
@@ -192,6 +203,7 @@ describe("gptscript module", () => {
192203 test ( "run executes and stream a file correctly" , async ( ) => {
193204 let out = ""
194205 let err = undefined
206+ let [ promptTokens , completionTokens , totalTokens ] = [ 0 , 0 , 0 ]
195207 const testGptPath = path . join ( __dirname , "fixtures" , "test.gpt" )
196208 const opts = {
197209 disableCache : true ,
@@ -204,8 +216,17 @@ describe("gptscript module", () => {
204216 await run . text ( )
205217 err = run . err
206218
219+ for ( let c in run . calls ) {
220+ promptTokens += run . calls [ c ] . usage . promptTokens || 0
221+ completionTokens += run . calls [ c ] . usage . completionTokens || 0
222+ totalTokens += run . calls [ c ] . usage . totalTokens || 0
223+ }
224+
207225 expect ( out ) . toContain ( "Calvin Coolidge" )
208226 expect ( err ) . toEqual ( "" )
227+ expect ( promptTokens ) . toBeGreaterThan ( 0 )
228+ expect ( completionTokens ) . toBeGreaterThan ( 0 )
229+ expect ( totalTokens ) . toBeGreaterThan ( 0 )
209230 } )
210231
211232 test ( "run executes and streams a file with global tools correctly" , async ( ) => {
@@ -273,9 +294,17 @@ describe("gptscript module", () => {
273294 instructions : "${question}"
274295 }
275296
276- const response = await ( await g . evaluate ( [ t0 , t1 ] ) ) . text ( )
297+ const run = await g . evaluate ( [ t0 , t1 ] )
298+ const response = await run . text ( )
277299 expect ( response ) . toBeDefined ( )
278300 expect ( response ) . toContain ( "Calvin Coolidge" )
301+
302+ // In this case, we expect the total number of tool results to be 1
303+ let toolResults = 0
304+ for ( let c in run . calls ) {
305+ toolResults += run . calls [ c ] . toolResults
306+ }
307+ expect ( toolResults ) . toStrictEqual ( 1 )
279308 } , 30000 )
280309
281310 test ( "with sub tool" , async ( ) => {
0 commit comments