From a08b5b2efa140bd0fce384b6ae4df0fa11f0200e Mon Sep 17 00:00:00 2001 From: Forhad Hosain Date: Wed, 26 Nov 2025 19:17:40 +0600 Subject: [PATCH 1/2] make error message clear for empty response --- .../core/src/Components/GenAILLM.class.ts | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/packages/core/src/Components/GenAILLM.class.ts b/packages/core/src/Components/GenAILLM.class.ts index 3b3954c9..77db18b1 100644 --- a/packages/core/src/Components/GenAILLM.class.ts +++ b/packages/core/src/Components/GenAILLM.class.ts @@ -528,26 +528,36 @@ export class GenAILLM extends Component { response = await contentPromise.catch((error) => { return { error: error.message || error }; }); - // If the model stopped before completing the response, this is usually due to output token limit reached. + + // #region Handle Response Errors + if (response?.error) { + const error = response?.error + ' ' + (response?.details || ''); + logger.error(` LLM Error=`, error); + + return { Output: response?.data, _error: error, _debug: logger.output }; + } + + const emptyResponseErrorMsg = + 'Empty response. This is usually due to output token limit reached. Please try again with a higher max tokens limit.'; + + // If the finish reason is not "stop", it means the model stopped before completing the response. if (finishReason !== 'stop') { + let errMsg = `The model stopped before completing the response. + \nReason: ${finishReason}. + \n${!response ? emptyResponseErrorMsg : ''}`; + return { Reply: response, - _error: 'The model stopped before completing the response, this is usually due to output token limit reached.', + _error: errMsg, _debug: logger.output, }; } - // in case we have the response but it's empty string, undefined or null + // If the finish reason is "stop" but there is still no response, it is usually caused by reaching the output token limit. if (!response) { - return { _error: ' LLM Error = Empty Response!', _debug: logger.output }; - } - - if (response?.error) { - const error = response?.error + ' ' + (response?.details || ''); - logger.error(` LLM Error=`, error); - - return { Output: response?.data, _error: error, _debug: logger.output }; + return { _error: emptyResponseErrorMsg, _debug: logger.output }; } + // #endregion const Reply = llmInference.connector.postProcess(response); if (Reply.error) { From 2ee5f5f306e527a3372413b1b1ec3e05662f4891 Mon Sep 17 00:00:00 2001 From: Forhad Hosain Date: Wed, 26 Nov 2025 21:31:24 +0600 Subject: [PATCH 2/2] include the field name in error message for empty response --- packages/core/src/Components/GenAILLM.class.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/src/Components/GenAILLM.class.ts b/packages/core/src/Components/GenAILLM.class.ts index 77db18b1..30541927 100644 --- a/packages/core/src/Components/GenAILLM.class.ts +++ b/packages/core/src/Components/GenAILLM.class.ts @@ -538,7 +538,7 @@ export class GenAILLM extends Component { } const emptyResponseErrorMsg = - 'Empty response. This is usually due to output token limit reached. Please try again with a higher max tokens limit.'; + "Empty response. This is usually due to output token limit reached. Please try again with a higher 'Maximum Output Tokens'."; // If the finish reason is not "stop", it means the model stopped before completing the response. if (finishReason !== 'stop') {