Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 83 additions & 2 deletions src/testFunctions.gs
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,15 @@ function testAll() {
testKnowledgeLink();
testVision();
testMaximumAPICalls();
testAddFile();
testMetadataAndLogs();
testResponseIdTracking();
testVectorStoreLifecycle();
}


// Helper to set API keys and run tests across models
function runTestAcrossModels(testName, setupFunction, runOptions = {}) {
function runTestAcrossModels(testName, setupFunction, runOptions = {}, afterRun) {
// Set API keys once per batch
GenAIApp.setGeminiAPIKey(GEMINI_API_KEY);
GenAIApp.setOpenAIAPIKey(OPEN_AI_API_KEY);
Expand All @@ -33,6 +37,9 @@ function runTestAcrossModels(testName, setupFunction, runOptions = {}) {
const options = { model: model.name, ...runOptions };
const response = chat.run(options);
console.log(`${testName} ${model.label}:\n${response}`);
if (afterRun) {
afterRun(chat, response, model);
}
});
}

Expand Down Expand Up @@ -106,7 +113,6 @@ function testKnowledgeLink() {
function testVision() {
runTestAcrossModels("Vision", chat => {
chat
.enableVision(true)
.addMessage("Describe the following image.")
.addImage(
"https://good-nature-blog-uploads.s3.amazonaws.com/uploads/2014/02/slide_336579_3401508_free-1200x640.jpg",
Expand All @@ -128,3 +134,78 @@ function getWeather(cityName) {
return `The weather in ${cityName} is 19°C today.`;
}

function testAddFile() {
const blob = Utilities.newBlob("Hello from a file", "text/plain", "hello.txt");
runTestAcrossModels("Add file", chat => {
chat
.addMessage("Read the file and summarize its content.")
.addFile(blob);
});
}

function testMetadataAndLogs() {
GenAIApp.setGlobalMetadata("suite", "tests");
const dummyFunction = GenAIApp.newFunction()
.setName("dummy")
.setDescription("A dummy function");

runTestAcrossModels(
"Metadata and logs",
chat => {
chat
.disableLogs(true)
.addMetadata("requestId", "12345")
.addMessage("Say hello")
.addFunction(dummyFunction);
},
{},
chat => {
console.log(`Messages: ${chat.getMessages()}`);
console.log(`Functions: ${chat.getFunctions()}`);
}
);
}

function testResponseIdTracking() {
GenAIApp.setGeminiAPIKey(GEMINI_API_KEY);
GenAIApp.setOpenAIAPIKey(OPEN_AI_API_KEY);

const models = [
{ name: GPT_MODEL, label: "GPT" },
{ name: REASONING_MODEL, label: "reasoning" },
{ name: GEMINI_MODEL, label: "gemini" }
];

models.forEach(model => {
const chat = GenAIApp.newChat();
chat.addMessage("Hello");
chat.run({ model: model.name });
const lastId = chat.retrieveLastResponseId();
chat.addMessage("Continue this conversation.");
if (lastId) {
chat.setPreviousResponseId(lastId);
}
const response = chat.run({ model: model.name });
console.log(`Response ID test ${model.label}:\n${response}`);
});
}
Comment on lines +169 to +191
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Harden the response ID test: log the retrieved ID and isolate per-model failures.

This reduces flakiness, surfaces whether an ID was retrieved, and prevents one failure from aborting subsequent iterations. Also, API keys are redundantly set elsewhere; ideally centralize.

Apply this diff:

-  models.forEach(model => {
-    const chat = GenAIApp.newChat();
-    chat.addMessage("Hello");
-    chat.run({ model: model.name });
-    const lastId = chat.retrieveLastResponseId();
-    chat.addMessage("Continue this conversation.");
-    if (lastId) {
-      chat.setPreviousResponseId(lastId);
-    }
-    const response = chat.run({ model: model.name });
-    console.log(`Response ID test ${model.label}:\n${response}`);
-  });
+  models.forEach(model => {
+    try {
+      const chat = GenAIApp.newChat();
+      chat.addMessage("Hello");
+      chat.run({ model: model.name });
+      const lastId = chat.retrieveLastResponseId();
+      console.log(`Last response id for ${model.label}: ${lastId || 'n/a'}`);
+      chat.addMessage("Continue this conversation.");
+      if (lastId) {
+        chat.setPreviousResponseId(lastId);
+      } else {
+        console.warn(`No response id retrieved for ${model.label}; running without previous id`);
+      }
+      const response = chat.run({ model: model.name });
+      console.log(`Response ID test ${model.label}:\n${response}`);
+    } catch (err) {
+      console.error(`Response ID test failed for ${model.label}: ${err && err.stack || err}`);
+    }
+  });

Please confirm that retrieveLastResponseId/setPreviousResponseId are implemented for all three models; if not, we should feature-detect and skip unsupported models.


🏁 Script executed:

#!/bin/bash
# Search for retrieveLastResponseId and setPreviousResponseId definitions across .gs files
rg -n --glob '*.gs' 'retrieveLastResponseId'
rg -n --glob '*.gs' 'setPreviousResponseId'

Length of output: 379


🏁 Script executed:

#!/bin/bash
# Display implementation of retrieveLastResponseId and setPreviousResponseId in code.gs
sed -n '300,360p' src/code.gs

Length of output: 1871


Harden the response ID test and confirm ID methods exist

I’ve verified that retrieveLastResponseId and setPreviousResponseId are implemented on the Chat object, so no per-model feature detection is needed. Applying the below diff will:

  • Log the retrieved ID (or warn if missing)
  • Isolate failures per model to avoid aborting the loop
  • Centralize error handling with try/catch

Also, consider centralizing API-key setup in your test harness instead of setting them in each test.

Apply this diff in src/testFunctions.gs (lines 169–191):

-  models.forEach(model => {
-    const chat = GenAIApp.newChat();
-    chat.addMessage("Hello");
-    chat.run({ model: model.name });
-    const lastId = chat.retrieveLastResponseId();
-    chat.addMessage("Continue this conversation.");
-    if (lastId) {
-      chat.setPreviousResponseId(lastId);
-    }
-    const response = chat.run({ model: model.name });
-    console.log(`Response ID test ${model.label}:\n${response}`);
-  });
+  models.forEach(model => {
+    try {
+      const chat = GenAIApp.newChat();
+      chat.addMessage("Hello");
+      chat.run({ model: model.name });
+      const lastId = chat.retrieveLastResponseId();
+      console.log(`Last response id for ${model.label}: ${lastId || 'n/a'}`);
+      chat.addMessage("Continue this conversation.");
+      if (lastId) {
+        chat.setPreviousResponseId(lastId);
+      } else {
+        console.warn(`No response id retrieved for ${model.label}; running without previous id`);
+      }
+      const response = chat.run({ model: model.name });
+      console.log(`Response ID test ${model.label}:\n${response}`);
+    } catch (err) {
+      console.error(`Response ID test failed for ${model.label}: ${err.stack || err}`);
+    }
+  });
🤖 Prompt for AI Agents
In src/testFunctions.gs around lines 169 to 191, the response ID test should be
hardened: wrap each model iteration in a try/catch so one model failure doesn't
abort the loop; after first run capture the ID and log it (or console.warn if
missing) before calling setPreviousResponseId; ensure setPreviousResponseId is
only called when an ID exists; and log per-model errors inside the catch so
failures are isolated and informative; also consider moving the API-key setup
out of this test into shared test harness rather than setting keys here.


function testVectorStoreLifecycle() {
GenAIApp.setOpenAIAPIKey(OPEN_AI_API_KEY);
const blob = Utilities.newBlob("Vector store content", "text/plain", "vs.txt");

const store = GenAIApp.newVectorStore()
.setName("test-store-" + Date.now())
.setDescription("Temporary store for tests")
.createVectorStore();

const storeId = store.getId();
const fileId = store.uploadAndAttachFile(blob, { source: "test" });
const files = store.listFiles();
console.log(`Vector store files: ${JSON.stringify(files)}`);

store.deleteFile(fileId);
store.deleteVectorStore();
console.log(`Vector store ${storeId} cleaned up.`);
}
Comment on lines +193 to +210
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Prevent resource leaks in vector store tests; add try/finally and brief wait for eventual consistency.

If any step throws, the vector store won’t be deleted, leaving orphaned resources. Also, a short delay before listing files can reduce flakiness while the backend indexes the upload.

Apply this diff:

-  const store = GenAIApp.newVectorStore()
-    .setName("test-store-" + Date.now())
-    .setDescription("Temporary store for tests")
-    .createVectorStore();
-
-  const storeId = store.getId();
-  const fileId = store.uploadAndAttachFile(blob, { source: "test" });
-  const files = store.listFiles();
-  console.log(`Vector store files: ${JSON.stringify(files)}`);
-
-  store.deleteFile(fileId);
-  store.deleteVectorStore();
-  console.log(`Vector store ${storeId} cleaned up.`);
+  const store = GenAIApp.newVectorStore()
+    .setName("test-store-" + Date.now())
+    .setDescription("Temporary store for tests")
+    .createVectorStore();
+
+  const storeId = store.getId();
+  let fileId;
+  try {
+    fileId = store.uploadAndAttachFile(blob, { source: "test" });
+    // Brief wait for indexing/consistency if backend is eventually consistent
+    Utilities.sleep(1500);
+    const files = store.listFiles();
+    console.log(`Vector store files: ${JSON.stringify(files)}`);
+    if (fileId) {
+      store.deleteFile(fileId);
+    }
+  } finally {
+    store.deleteVectorStore();
+    console.log(`Vector store ${storeId} cleaned up.`);
+  }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
function testVectorStoreLifecycle() {
GenAIApp.setOpenAIAPIKey(OPEN_AI_API_KEY);
const blob = Utilities.newBlob("Vector store content", "text/plain", "vs.txt");
const store = GenAIApp.newVectorStore()
.setName("test-store-" + Date.now())
.setDescription("Temporary store for tests")
.createVectorStore();
const storeId = store.getId();
const fileId = store.uploadAndAttachFile(blob, { source: "test" });
const files = store.listFiles();
console.log(`Vector store files: ${JSON.stringify(files)}`);
store.deleteFile(fileId);
store.deleteVectorStore();
console.log(`Vector store ${storeId} cleaned up.`);
}
function testVectorStoreLifecycle() {
GenAIApp.setOpenAIAPIKey(OPEN_AI_API_KEY);
const blob = Utilities.newBlob("Vector store content", "text/plain", "vs.txt");
const store = GenAIApp.newVectorStore()
.setName("test-store-" + Date.now())
.setDescription("Temporary store for tests")
.createVectorStore();
const storeId = store.getId();
let fileId;
try {
fileId = store.uploadAndAttachFile(blob, { source: "test" });
// Brief wait for indexing/consistency if backend is eventually consistent
Utilities.sleep(1500);
const files = store.listFiles();
console.log(`Vector store files: ${JSON.stringify(files)}`);
if (fileId) {
store.deleteFile(fileId);
}
} finally {
store.deleteVectorStore();
console.log(`Vector store ${storeId} cleaned up.`);
}
}
🤖 Prompt for AI Agents
In src/testFunctions.gs around lines 193 to 210, the test currently uploads a
file and deletes the vector store without protection against exceptions and
without a short wait for eventual indexing; wrap the upload/list/delete
lifecycle in a try/finally so the file and the vector store are always removed
even if an intermediate step throws, and insert a brief sleep (e.g., a few
hundred milliseconds) after upload before listing files to reduce flakiness from
eventual consistency; ensure you capture the returned fileId so the finally
block can safely delete the file (guarding deletion calls if ids are undefined)
and still delete the store in all cases.