From 21df1228bfa0ebdf4d85aa5824670f5581eef276 Mon Sep 17 00:00:00 2001 From: chanzhi82020 Date: Mon, 25 Aug 2025 16:57:37 +0800 Subject: [PATCH 001/255] This PR introduces evaluation support designed specifically to track and benchmark applications built on the FastGPT platform. (#5476) - Adds a lightweight evaluation framework for app-level tracking and benchmarking. - Changes: 28 files, +1455 additions, -66 deletions. - Branch: add-evaluations -> main. - PR: https://github.com/chanzhi82020/FastGPT/pull/1 Applications built on FastGPT need repeatable, comparable benchmarks to measure regressions, track improvements, and validate releases. This initial implementation provides the primitives to define evaluation scenarios, run them against app endpoints or model components, and persist results for later analysis. I updated the PR description to emphasize that the evaluation system is targeted at FastGPT-built apps and expanded the explanation of the core pieces so reviewers understand the scope and intended use. The new description outlines the feature intent, core components, and how results are captured and aggregated for benchmarking. - Evaluation definitions - Define evaluation tasks that reference an app (app id, version, endpoint), test datasets or input cases, expected outputs (when applicable), and run configuration (parallelism, timeouts). - Support for custom metric plugins so teams can add domain-specific measures. - Runner / Executor - Executes evaluation cases against app endpoints or internal model interfaces. - Captures raw responses, response times, status codes, and any runtime errors. - Computes per-case metrics (e.g., correctness, latency) immediately after each case run. - Metrics & Aggregation - Built-in metrics: accuracy/success rate, latency (p50/p90/p99), throughput, error rate. - Aggregation produces per-run summaries and per-app historical summaries for trend analysis. - Allows combining metrics into composite scores for high-level benchmarking. - Persistence & Logging - Stores run results, input/output pairs (when needed), timestamps, environment info, and app/version metadata so runs are reproducible and auditable. - Logs are retained to facilitate debugging and root-cause analysis of regressions. - Reporting & Comparison - Produces aggregated reports suitable for CI gating, release notes, or dashboards. - Supports comparing multiple app versions or deployments side-by-side. - Extensibility & Integration - Designed to plug into CI (automated runs on PRs or releases), dashboards, and downstream analysis tools. - Easy to add new metrics, evaluators, or dataset connectors. By centering the evaluation system on FastGPT apps, teams can benchmark full application behavior (not only raw model outputs), correlate metrics with deployment configurations, and make informed release decisions. - Expand built-in metric suite (e.g., F1, BLEU/ROUGE where applicable), add dataset connectors, and provide example evaluation scenarios for sample apps. - Integrate with CI pipelines and add basic dashboarding for trend visualization. Related Issue: N/A Co-authored-by: Archer <545436317@qq.com> --- .../global/core/{app => }/evaluation/api.d.ts | 0 .../core/{app => }/evaluation/constants.ts | 2 +- .../core/{app => }/evaluation/type.d.ts | 0 .../global/core/{app => }/evaluation/utils.ts | 2 +- packages/service/core/app/controller.ts | 4 +- .../{app => }/evaluation/evalItemSchema.ts | 6 +- .../core/{app => }/evaluation/evalSchema.ts | 8 +- packages/service/core/evaluation/index.ts | 370 ++++++++++++++++++ .../service/core/{app => }/evaluation/mq.ts | 4 +- packages/service/core/evaluation/scoring.ts | 129 ++++++ packages/service/core/evaluation/utils.ts | 185 +++++++++ .../support/permission/evaluation/auth.ts | 4 +- packages/service/type/env.d.ts | 4 + projects/app/.env.template | 4 + projects/app/src/instrumentation.ts | 3 + .../account/model/AddModelBox.tsx | 18 +- .../pageComponents/dashboard/Container.tsx | 26 +- .../{app => }/evaluation/DetailModal.tsx | 10 +- .../src/pages/api/core/evaluation/create.ts | 159 ++++++++ .../src/pages/api/core/evaluation/delete.ts | 53 +++ .../pages/api/core/evaluation/deleteItem.ts | 23 ++ .../pages/api/core/evaluation/exportItems.ts | 112 ++++++ .../app/src/pages/api/core/evaluation/list.ts | 194 +++++++++ .../pages/api/core/evaluation/listItems.ts | 83 ++++ .../pages/api/core/evaluation/retryItem.ts | 45 +++ .../pages/api/core/evaluation/updateItem.ts | 46 +++ .../src/pages/dashboard/evaluation/create.tsx | 6 +- .../src/pages/dashboard/evaluation/index.tsx | 6 +- .../{app/api => evaluation}/evaluation.ts | 19 +- 29 files changed, 1460 insertions(+), 65 deletions(-) rename packages/global/core/{app => }/evaluation/api.d.ts (100%) rename packages/global/core/{app => }/evaluation/constants.ts (91%) rename packages/global/core/{app => }/evaluation/type.d.ts (100%) rename packages/global/core/{app => }/evaluation/utils.ts (85%) rename packages/service/core/{app => }/evaluation/evalItemSchema.ts (82%) rename packages/service/core/{app => }/evaluation/evalSchema.ts (83%) create mode 100644 packages/service/core/evaluation/index.ts rename packages/service/core/{app => }/evaluation/mq.ts (94%) create mode 100644 packages/service/core/evaluation/scoring.ts create mode 100644 packages/service/core/evaluation/utils.ts rename projects/app/src/pageComponents/{app => }/evaluation/DetailModal.tsx (98%) create mode 100644 projects/app/src/pages/api/core/evaluation/create.ts create mode 100644 projects/app/src/pages/api/core/evaluation/delete.ts create mode 100644 projects/app/src/pages/api/core/evaluation/deleteItem.ts create mode 100644 projects/app/src/pages/api/core/evaluation/exportItems.ts create mode 100644 projects/app/src/pages/api/core/evaluation/list.ts create mode 100644 projects/app/src/pages/api/core/evaluation/listItems.ts create mode 100644 projects/app/src/pages/api/core/evaluation/retryItem.ts create mode 100644 projects/app/src/pages/api/core/evaluation/updateItem.ts rename projects/app/src/web/core/{app/api => evaluation}/evaluation.ts (67%) diff --git a/packages/global/core/app/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts similarity index 100% rename from packages/global/core/app/evaluation/api.d.ts rename to packages/global/core/evaluation/api.d.ts diff --git a/packages/global/core/app/evaluation/constants.ts b/packages/global/core/evaluation/constants.ts similarity index 91% rename from packages/global/core/app/evaluation/constants.ts rename to packages/global/core/evaluation/constants.ts index d6b02985820c..5624267718e7 100644 --- a/packages/global/core/app/evaluation/constants.ts +++ b/packages/global/core/evaluation/constants.ts @@ -1,4 +1,4 @@ -import { i18nT } from '../../../../web/i18n/utils'; +import { i18nT } from '../../../web/i18n/utils'; export const evaluationFileErrors = i18nT('dashboard_evaluation:eval_file_check_error'); diff --git a/packages/global/core/app/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts similarity index 100% rename from packages/global/core/app/evaluation/type.d.ts rename to packages/global/core/evaluation/type.d.ts diff --git a/packages/global/core/app/evaluation/utils.ts b/packages/global/core/evaluation/utils.ts similarity index 85% rename from packages/global/core/app/evaluation/utils.ts rename to packages/global/core/evaluation/utils.ts index adad61c67817..4e29fcfb919b 100644 --- a/packages/global/core/app/evaluation/utils.ts +++ b/packages/global/core/evaluation/utils.ts @@ -1,4 +1,4 @@ -import type { VariableItemType } from '../type'; +import type { VariableItemType } from '../app/type'; export const getEvaluationFileHeader = (appVariables?: VariableItemType[]) => { if (!appVariables || appVariables.length === 0) return '*q,*a,history'; diff --git a/packages/service/core/app/controller.ts b/packages/service/core/app/controller.ts index 55aab10f7f5d..fde3d90d41a6 100644 --- a/packages/service/core/app/controller.ts +++ b/packages/service/core/app/controller.ts @@ -7,8 +7,8 @@ import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node' import { encryptSecretValue, storeSecretValue } from '../../common/secret/utils'; import { SystemToolInputTypeEnum } from '@fastgpt/global/core/app/systemTool/constants'; import { type ClientSession } from '../../common/mongo'; -import { MongoEvaluation } from './evaluation/evalSchema'; -import { removeEvaluationJob } from './evaluation/mq'; +import { MongoEvaluation } from '../evaluation/evalSchema'; +import { removeEvaluationJob } from '../evaluation/mq'; import { deleteChatFiles } from '../chat/controller'; import { MongoChatItem } from '../chat/chatItemSchema'; import { MongoChat } from '../chat/chatSchema'; diff --git a/packages/service/core/app/evaluation/evalItemSchema.ts b/packages/service/core/evaluation/evalItemSchema.ts similarity index 82% rename from packages/service/core/app/evaluation/evalItemSchema.ts rename to packages/service/core/evaluation/evalItemSchema.ts index 45e8633da062..732b59f68ca4 100644 --- a/packages/service/core/app/evaluation/evalItemSchema.ts +++ b/packages/service/core/evaluation/evalItemSchema.ts @@ -1,10 +1,10 @@ -import { connectionMongo, getMongoModel } from '../../../common/mongo'; +import { connectionMongo, getMongoModel } from '../../common/mongo'; import { EvaluationCollectionName } from './evalSchema'; import { EvaluationStatusEnum, EvaluationStatusValues -} from '@fastgpt/global/core/app/evaluation/constants'; -import type { EvalItemSchemaType } from '@fastgpt/global/core/app/evaluation/type'; +} from '@fastgpt/global/core/evaluation/constants'; +import type { EvalItemSchemaType } from '@fastgpt/global/core/evaluation/type'; const { Schema } = connectionMongo; diff --git a/packages/service/core/app/evaluation/evalSchema.ts b/packages/service/core/evaluation/evalSchema.ts similarity index 83% rename from packages/service/core/app/evaluation/evalSchema.ts rename to packages/service/core/evaluation/evalSchema.ts index a8678ebda04b..458d89f4d997 100644 --- a/packages/service/core/app/evaluation/evalSchema.ts +++ b/packages/service/core/evaluation/evalSchema.ts @@ -2,10 +2,10 @@ import { TeamCollectionName, TeamMemberCollectionName } from '@fastgpt/global/support/user/team/constant'; -import { connectionMongo, getMongoModel } from '../../../common/mongo'; -import { AppCollectionName } from '../schema'; -import type { EvaluationSchemaType } from '@fastgpt/global/core/app/evaluation/type'; -import { UsageCollectionName } from '../../../support/wallet/usage/schema'; +import { connectionMongo, getMongoModel } from '../../common/mongo'; +import { AppCollectionName } from '../app/schema'; +import type { EvaluationSchemaType } from '@fastgpt/global/core/evaluation/type'; +import { UsageCollectionName } from '../../support/wallet/usage/schema'; const { Schema } = connectionMongo; export const EvaluationCollectionName = 'eval'; diff --git a/packages/service/core/evaluation/index.ts b/packages/service/core/evaluation/index.ts new file mode 100644 index 000000000000..7f2156c94148 --- /dev/null +++ b/packages/service/core/evaluation/index.ts @@ -0,0 +1,370 @@ +import { addLog } from '../../common/system/log'; +import type { Job } from '../../common/bullmq'; +import { getEvaluationWorker, type EvaluationJobData, removeEvaluationJob } from './mq'; +import { MongoEvalItem } from './evalItemSchema'; +import { Types } from 'mongoose'; +import { dispatchWorkFlow } from '../workflow/dispatch'; +import { MongoEvaluation } from './evalSchema'; +import { getNanoid } from '@fastgpt/global/common/string/tools'; +import { getAppLatestVersion } from '../../core/app/version/controller'; +import { + getWorkflowEntryNodeIds, + storeEdges2RuntimeEdges, + storeNodes2RuntimeNodes +} from '@fastgpt/global/core/workflow/runtime/utils'; +import type { UserChatItemValueItemType } from '@fastgpt/global/core/chat/type'; +import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants'; +import { WORKFLOW_MAX_RUN_TIMES } from '../../core/workflow/constants'; +import { getAppEvaluationScore } from './scoring'; +import { checkTeamAIPoints } from '../../support/permission/teamLimit'; +import { EvaluationStatusEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { + EvalItemSchemaType, + EvaluationSchemaType +} from '@fastgpt/global/core/evaluation/type'; +import type { Document } from 'mongoose'; +import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; +import { + InformLevelEnum, + SendInformTemplateCodeEnum +} from '@fastgpt/global/support/user/inform/constants'; +import type { AppChatConfigType, AppSchema } from '@fastgpt/global/core/app/type'; +import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; +import type { StoreEdgeItemType } from '@fastgpt/global/core/workflow/type/edge'; +import { getErrText } from '@fastgpt/global/common/error/utils'; +import { formatModelChars2Points } from '../../support/wallet/usage/utils'; +import { ModelTypeEnum } from '@fastgpt/global/core/ai/model'; +import { concatUsage } from '../../support/wallet/usage/controller'; +import { MongoApp } from '../../core/app/schema'; +import { delay } from '@fastgpt/global/common/system/utils'; +import { removeDatasetCiteText } from '../../core/ai/utils'; +import { getUserChatInfoAndAuthTeamPoints } from '../../support/permission/auth/team'; +import { getRunningUserInfoByTmbId } from '../../support/user/team/utils'; + +type AppContextType = { + appData: AppSchema; + timezone: string; + externalProvider: Record; + nodes: StoreNodeItemType[]; + edges: StoreEdgeItemType[]; + chatConfig: AppChatConfigType; +}; + +export const initEvaluationWorker = () => { + addLog.info('Init Evaluation Worker...'); + return getEvaluationWorker(processor); +}; + +const dealAiPointCheckError = async (evalId: string, error: any) => { + if (error === TeamErrEnum.aiPointsNotEnough) { + await MongoEvaluation.updateOne( + { _id: new Types.ObjectId(evalId) }, + { $set: { errorMessage: error } } + ); + + const evaluation = await MongoEvaluation.findById(evalId).lean(); + if (evaluation) { + sendInform2OneUser({ + level: InformLevelEnum.important, + templateCode: 'LACK_OF_POINTS', + templateParam: {}, + teamId: evaluation.teamId + }); + } + return; + } + + return Promise.reject(error); +}; + +const finishEvaluation = async (evalId: string) => { + // Computed all eval score and add to evaluation collection + const scoreResult = await MongoEvalItem.aggregate([ + { + $match: { + evalId: new Types.ObjectId(evalId), + status: EvaluationStatusEnum.completed, + errorMessage: { $exists: false }, + score: { $exists: true } + } + }, + { + $group: { + _id: null, + avgScore: { $avg: '$score' } + } + } + ]); + + const avgScore = scoreResult.length > 0 ? scoreResult[0].avgScore : 0; + + await MongoEvaluation.updateOne( + { _id: new Types.ObjectId(evalId) }, + { + $set: { + finishTime: new Date(), + score: avgScore + } + } + ); + + addLog.info('[Evaluation] Task finished', { evalId, avgScore }); +}; + +const handleEvalItemError = async ( + evalItem: Document & EvalItemSchemaType, + error: any +) => { + const errorMessage = getErrText(error); + + await MongoEvalItem.updateOne( + { _id: evalItem._id }, + { + $inc: { retry: -1 }, + $set: { + errorMessage + } + } + ); +}; + +const createMergedEvaluationUsage = async ( + params: { + evaluation: EvaluationSchemaType; + totalPoints: number; + } & ( + | { + type: 'run'; + } + | { + type: 'eval'; + inputTokens: number; + outputTokens: number; + } + ) +) => { + const { evaluation, totalPoints } = params; + + if (params.type === 'run') { + await concatUsage({ + billId: evaluation.usageId, + teamId: evaluation.teamId, + tmbId: evaluation.tmbId, + totalPoints, + count: 1, + listIndex: 0 + }); + } else if (params.type === 'eval') { + await concatUsage({ + billId: evaluation.usageId, + teamId: evaluation.teamId, + tmbId: evaluation.tmbId, + totalPoints, + inputTokens: params.inputTokens, + outputTokens: params.outputTokens, + listIndex: 1 + }); + } +}; + +const processEvalItem = async ({ + evalItem, + evaluation, + appContext +}: { + evalItem: Document & EvalItemSchemaType; + evaluation: EvaluationSchemaType; + appContext: AppContextType; +}) => { + const getAppAnswer = async (): Promise => { + if (evalItem?.response) { + return evalItem.response; + } + + const { appData, timezone, externalProvider, nodes, edges, chatConfig } = appContext; + const chatId = getNanoid(); + + const query: UserChatItemValueItemType[] = [ + { + type: ChatItemValueTypeEnum.text, + text: { + content: evalItem?.question || '' + } + } + ]; + + const histories = (() => { + try { + return evalItem?.history ? JSON.parse(evalItem.history) : []; + } catch (error) { + return []; + } + })(); + + const { assistantResponses, flowUsages } = await dispatchWorkFlow({ + chatId, + timezone, + externalProvider, + mode: 'chat', + runningAppInfo: { + id: String(appData._id), + teamId: String(appData.teamId), + tmbId: String(appData.tmbId) + }, + runningUserInfo: await getRunningUserInfoByTmbId(evaluation.tmbId), + uid: String(evaluation.tmbId), + runtimeNodes: storeNodes2RuntimeNodes(nodes, getWorkflowEntryNodeIds(nodes)), + runtimeEdges: storeEdges2RuntimeEdges(edges), + variables: evalItem?.globalVariables || {}, + query, + chatConfig, + histories, + stream: false, + maxRunTimes: WORKFLOW_MAX_RUN_TIMES + }); + const totalPoints = flowUsages.reduce((sum, item) => sum + (item.totalPoints || 0), 0); + const appAnswer = removeDatasetCiteText(assistantResponses[0]?.text?.content || '', false); + + evalItem.response = appAnswer; + evalItem.responseTime = new Date(); + await evalItem.save(); + + // Push usage + createMergedEvaluationUsage({ + evaluation, + totalPoints, + type: 'run' + }); + + return appAnswer; + }; + + const appAnswer = await getAppAnswer(); + + // Eval score + const { accuracyScore, usage } = await getAppEvaluationScore({ + question: evalItem?.question || '', + appAnswer, + standardAnswer: evalItem?.expectedResponse || '', + model: evaluation.evalModel + }); + + evalItem.status = EvaluationStatusEnum.completed; + evalItem.accuracy = accuracyScore; + evalItem.score = accuracyScore; + evalItem.finishTime = new Date(); + await evalItem.save(); + + // Push usage + const { totalPoints: evalModelPoints } = formatModelChars2Points({ + model: evaluation.evalModel, + modelType: ModelTypeEnum.llm, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }); + createMergedEvaluationUsage({ + evaluation, + totalPoints: evalModelPoints, + type: 'eval', + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens + }); +}; + +const processor = async (job: Job) => { + const { evalId } = job.data; + + // 初始化检查 + const evaluation = await MongoEvaluation.findById(evalId); + if (!evaluation) { + addLog.warn('[Evaluation] Eval not found', { evalId }); + await removeEvaluationJob(evalId); + return; + } + + const appData = await MongoApp.findById(evaluation.appId); + if (!appData) { + addLog.warn('[Evaluation] App not found', { evalId }); + await removeEvaluationJob(evalId); + return; + } + + const [{ timezone, externalProvider }, { nodes, edges, chatConfig }] = await Promise.all([ + getUserChatInfoAndAuthTeamPoints(appData.tmbId), + getAppLatestVersion(appData._id, appData), + // Reset error message + MongoEvaluation.updateOne({ _id: new Types.ObjectId(evalId) }, { $set: { errorMessage: null } }) + ]); + + const appContext: AppContextType = { + appData, + timezone, + externalProvider, + nodes, + edges, + chatConfig + }; + + // 主循环 + while (true) { + try { + await checkTeamAIPoints(evaluation.teamId); + } catch (error) { + return await dealAiPointCheckError(evalId, error); + } + + const evalItem = await MongoEvalItem.findOneAndUpdate( + { + evalId, + status: { $in: [EvaluationStatusEnum.queuing, EvaluationStatusEnum.evaluating] }, + retry: { $gt: 0 } + }, + { + $set: { status: EvaluationStatusEnum.evaluating } + } + ); + if (!evalItem) { + await finishEvaluation(evalId); + break; + } + + // Process eval item + try { + await processEvalItem({ + evalItem, + evaluation, + appContext + }); + } catch (error) { + if (error === 'Evaluation model not found') { + addLog.warn('[Evaluation] Model not found', { evalId, model: evaluation.evalModel }); + + await MongoEvaluation.updateOne( + { _id: new Types.ObjectId(evalId) }, + { $set: { errorMessage: `Model ${evaluation.evalModel} not found` } } + ).catch(); + + break; + } + + await handleEvalItemError(evalItem, error); + await delay(100); + } + } +}; +function getMessageTemplate(templateCode: any): { + getInformTemplate: any; + lockMinutes: any; + isSendQueue: any; +} { + throw new Error('Function not implemented.'); +} + +function sendInform2OneUser(arg0: { + level: InformLevelEnum; + templateCode: string; + templateParam: {}; + teamId: string; +}) { + addLog.warn('sendInform2OneUser: Starting notification process:', arg0); +} diff --git a/packages/service/core/app/evaluation/mq.ts b/packages/service/core/evaluation/mq.ts similarity index 94% rename from packages/service/core/app/evaluation/mq.ts rename to packages/service/core/evaluation/mq.ts index c0192d4763d8..8a5625b8aace 100644 --- a/packages/service/core/app/evaluation/mq.ts +++ b/packages/service/core/evaluation/mq.ts @@ -1,6 +1,6 @@ -import { getQueue, getWorker, QueueNames } from '../../../common/bullmq'; +import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; import { type Processor } from 'bullmq'; -import { addLog } from '../../../common/system/log'; +import { addLog } from '../../common/system/log'; export type EvaluationJobData = { evalId: string; diff --git a/packages/service/core/evaluation/scoring.ts b/packages/service/core/evaluation/scoring.ts new file mode 100644 index 000000000000..00f4fd9ba9c8 --- /dev/null +++ b/packages/service/core/evaluation/scoring.ts @@ -0,0 +1,129 @@ +import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type'; +import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants'; +import { getLLMModel } from '../../core/ai/model'; +import { createChatCompletion } from '../../core/ai/config'; +import { formatLLMResponse, llmCompletionsBodyFormat } from '../../core/ai/utils'; +import { loadRequestMessages } from '../../core/chat/utils'; +import { countGptMessagesTokens, countPromptTokens } from '../../common/string/tiktoken'; + +const template_accuracy1 = ` +Instruction: You are a world class state of the art assistant for rating a User Answer given a Question. The Question is completely answered by the Reference Answer. +Say 4, if User Answer is full contained and equivalent to Reference Answer in all terms, topics, numbers, metrics, dates and units. +Say 2, if User Answer is partially contained and almost equivalent to Reference Answer in all terms, topics, numbers, metrics, dates and units. +Say 0, if User Answer is not contained in Reference Answer or not accurate in all terms, topics, numbers, metrics, dates and units or the User Answer do not answer the question. +Do not explain or justify your rating. Your rating must be only 4, 2 or 0 according to the instructions above. + +## Question +{query} + +## Answer0 +{sentence_inference} + +## Answer1 +{sentence_true} + +## Rating`; + +const template_accuracy2 = ` +I will rate the User Answer in comparison to the Reference Answer for a given Question. +A rating of 4 indicates that the User Answer is entirely consistent with the Reference Answer, covering all aspects, topics, numbers, metrics, dates, and units. +A rating of 2 signifies that the User Answer is mostly aligned with the Reference Answer, with minor discrepancies in some areas. +A rating of 0 means that the User Answer is either inaccurate, incomplete, or unrelated to the Reference Answer, or it fails to address the Question. +I will provide the rating without any explanation or justification, adhering to the following scale: 0 (no match), 2 (partial match), 4 (exact match). +Do not explain or justify my rating. My rating must be only 4, 2 or 0 only. + +## Question +{query} + +## Answer0 +{sentence_inference} + +## Answer1 +{sentence_true} + +## Rating`; + +export const getAppEvaluationScore = async ({ + question, + appAnswer, + standardAnswer, + model +}: { + question: string; + appAnswer: string; + standardAnswer: string; + model: string; +}) => { + const modelData = getLLMModel(model); + if (!modelData) { + return Promise.reject('Evaluation model not found'); + } + + const getEvalResult = async (template: string) => { + const messages: ChatCompletionMessageParam[] = [ + { + role: ChatCompletionRequestMessageRoleEnum.System, + content: template + }, + { + role: ChatCompletionRequestMessageRoleEnum.User, + content: [ + { + type: 'text', + text: `## Question +${question} + +## Answer0 +${appAnswer} + +## Answer1 +${standardAnswer} + +## Rating` + } + ] + } + ]; + const { response } = await createChatCompletion({ + body: llmCompletionsBodyFormat( + { + model: modelData.model, + temperature: 0.3, + messages: await loadRequestMessages({ messages, useVision: true }), + stream: true, + max_tokens: 5 + }, + modelData + ) + }); + + const { text, usage } = await formatLLMResponse(response); + + const numberText = Number(text); + const rate = isNaN(numberText) ? 0 : numberText / 4; + + return { + rate, + inputTokens: usage?.prompt_tokens || (await countGptMessagesTokens(messages)), + outputTokens: usage?.completion_tokens || (await countPromptTokens(text)) + }; + }; + + const results = await Promise.all([ + getEvalResult(template_accuracy1), + getEvalResult(template_accuracy2) + ]); + + const accuracyScore = + Math.round((results.reduce((acc, item) => acc + item.rate, 0) / results.length) * 100) / 100; + const inputTokens = results.reduce((acc, item) => acc + item.inputTokens, 0); + const outputTokens = results.reduce((acc, item) => acc + item.outputTokens, 0); + + return { + accuracyScore, + usage: { + inputTokens, + outputTokens + } + }; +}; diff --git a/packages/service/core/evaluation/utils.ts b/packages/service/core/evaluation/utils.ts new file mode 100644 index 000000000000..a02cc4b25b7a --- /dev/null +++ b/packages/service/core/evaluation/utils.ts @@ -0,0 +1,185 @@ +import { evaluationFileErrors } from '@fastgpt/global/core/evaluation/constants'; +import { getEvaluationFileHeader } from '@fastgpt/global/core/evaluation/utils'; +import type { VariableItemType } from '@fastgpt/global/core/app/type'; +// import { addLog } from '@fastgpt/service/common/system/log'; +import { VariableInputEnum } from '@fastgpt/global/core/workflow/constants'; +import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; +import { Types } from 'mongoose'; +import { retryFn } from '@fastgpt/global/common/system/utils'; +import { i18nT } from '../../../web/i18n/utils'; +import { addLog } from '../../common/system/log'; +import { MongoEvaluation } from './evalSchema'; +import { addEvaluationJob } from './mq'; + +export const validateEvaluationFile = async ( + rawText: string, + appVariables?: VariableItemType[] +) => { + // const lines = rawText.trim().split('\r\n'); + // const dataLength = lines.length; + + // 使用正则表达式分割所有类型的换行符(\r\n、\n、\r) + const lines = rawText.trim().split(/\r?\n|\r/); + const dataLength = lines.length; + + // 过滤可能的空行(处理文件末尾可能的空行) + const nonEmptyLines = lines.filter((line) => line.trim() !== ''); + if (nonEmptyLines.length === 0) { + addLog.error('File is empty'); + return Promise.reject(evaluationFileErrors); + } + + // Validate file header + const expectedHeader = getEvaluationFileHeader(appVariables); + // 去除头部可能的空白字符(如BOM头或空格) + const actualHeader = nonEmptyLines[0].trim(); + + if (actualHeader !== expectedHeader) { + addLog.error(`Header mismatch. Expected: "${expectedHeader}", Got: "${actualHeader}"`); + return Promise.reject(evaluationFileErrors); + } + + // Validate data rows count + if (dataLength <= 1) { + addLog.error('No data rows found'); + return Promise.reject(evaluationFileErrors); + } + + const maxRows = 1000; + if (dataLength - 1 > maxRows) { + addLog.error(`Too many rows. Max: ${maxRows}, Got: ${dataLength - 1}`); + return Promise.reject(evaluationFileErrors); + } + + const headers = lines[0].split(','); + + // Get required field indices + const requiredFields = headers + .map((header, index) => ({ header: header.trim(), index })) + .filter(({ header }) => header.startsWith('*')); + + const errors: string[] = []; + + // Validate each data row + for (let i = 1; i < lines.length; i++) { + const values = lines[i].trim().split(','); + + // Check required fields + requiredFields.forEach(({ header, index }) => { + if (!values[index]?.trim()) { + errors.push(`Row ${i + 1}: required field "${header}" is empty`); + } + }); + + // Validate app variables + if (appVariables) { + validateRowVariables({ + values, + variables: appVariables, + rowNum: i + 1, + errors + }); + } + } + + if (errors.length > 0) { + addLog.error(`Validation failed: ${errors.join('; ')}`); + return Promise.reject(evaluationFileErrors); + } + + return { lines, dataLength }; +}; + +const validateRowVariables = ({ + values, + variables, + rowNum, + errors +}: { + values: string[]; + variables: VariableItemType[]; + rowNum: number; + errors: string[]; +}) => { + variables.forEach((variable, index) => { + const value = values[index]?.trim(); + + // Skip validation if value is empty and not required + if (!value && !variable.required) return; + + switch (variable.type) { + case VariableInputEnum.input: + // Validate string length + if (variable.maxLength && value && value.length > variable.maxLength) { + errors.push( + `Row ${rowNum}: "${variable.label}" exceeds max length (${variable.maxLength})` + ); + } + break; + + case VariableInputEnum.numberInput: + // Validate number type and range + if (value) { + const numValue = Number(value); + if (isNaN(numValue)) { + errors.push(`Row ${rowNum}: "${variable.label}" must be a number`); + } else { + if (variable.min !== undefined && numValue < variable.min) { + errors.push(`Row ${rowNum}: "${variable.label}" below minimum (${variable.min})`); + } + if (variable.max !== undefined && numValue > variable.max) { + errors.push(`Row ${rowNum}: "${variable.label}" exceeds maximum (${variable.max})`); + } + } + } + break; + + case VariableInputEnum.select: + // Validate select options + if (value && variable.enums?.length) { + const validOptions = variable.enums.map((item) => item.value); + if (!validOptions.includes(value)) { + errors.push( + `Row ${rowNum}: "${variable.label}" invalid option. Valid: [${validOptions.join(', ')}]` + ); + } + } + break; + } + }); +}; + +export const checkTeamHasRunningEvaluation = async (teamId: string) => { + const runningEvaluation = await MongoEvaluation.findOne( + { + teamId: new Types.ObjectId(teamId), + finishTime: { $exists: false } + }, + '_id' + ).lean(); + + if (runningEvaluation) { + return Promise.reject(i18nT('dashboard_evaluation:team_has_running_evaluation')); + } +}; + +export const resumePausedEvaluations = async (teamId: string): Promise => { + return retryFn(async () => { + const pausedEvaluations = await MongoEvaluation.find({ + teamId: new Types.ObjectId(teamId), + errorMessage: TeamErrEnum.aiPointsNotEnough, + finishTime: { $exists: false } + }).lean(); + + if (pausedEvaluations.length === 0) { + return; + } + + for (const evaluation of pausedEvaluations) { + await MongoEvaluation.updateOne({ _id: evaluation._id }, { $unset: { errorMessage: 1 } }); + await addEvaluationJob({ evalId: String(evaluation._id) }); + } + + addLog.info('Resumed paused evaluations', { teamId, count: pausedEvaluations.length }); + }, 3); +}; diff --git a/packages/service/support/permission/evaluation/auth.ts b/packages/service/support/permission/evaluation/auth.ts index 1622ef31e7e8..4629d9ec31ca 100644 --- a/packages/service/support/permission/evaluation/auth.ts +++ b/packages/service/support/permission/evaluation/auth.ts @@ -4,9 +4,9 @@ import { ManagePermissionVal, ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; -import type { EvaluationSchemaType } from '@fastgpt/global/core/app/evaluation/type'; +import type { EvaluationSchemaType } from '@fastgpt/global/core/evaluation/type'; import type { AuthModeType } from '../type'; -import { MongoEvaluation } from '../../../core/app/evaluation/evalSchema'; +import { MongoEvaluation } from '../../../core/evaluation/evalSchema'; export const authEval = async ({ evalId, diff --git a/packages/service/type/env.d.ts b/packages/service/type/env.d.ts index aff70d4eea88..2471b807c4ae 100644 --- a/packages/service/type/env.d.ts +++ b/packages/service/type/env.d.ts @@ -46,6 +46,10 @@ declare global { CHAT_LOG_SOURCE_ID_PREFIX?: string; NEXT_PUBLIC_BASE_URL: string; + + // evaluations settings + EVAL_CONCURRENCY?: string; + EVAL_LINE_LIMIT?: string; } } } diff --git a/projects/app/.env.template b/projects/app/.env.template index 79aef450f9eb..6fde1d5e9d1f 100644 --- a/projects/app/.env.template +++ b/projects/app/.env.template @@ -100,3 +100,7 @@ SIGNOZ_BASE_URL= SIGNOZ_SERVICE_NAME= SIGNOZ_STORE_LEVEL=warn +# evaluations settings +EVAL_CONCURRENCY=3 # the number of concurrent evaluations tasks +EVAL_LINE_LIMIT=1000 # the max line number of the uploaded eval data file + diff --git a/projects/app/src/instrumentation.ts b/projects/app/src/instrumentation.ts index ebaf45cbade3..d4b6f4b251ab 100644 --- a/projects/app/src/instrumentation.ts +++ b/projects/app/src/instrumentation.ts @@ -15,6 +15,7 @@ export async function register() { { initVectorStore }, { initRootUser }, { startMongoWatch }, + { initEvaluationWorker }, { startCron }, { startTrainingQueue }, { preLoadWorker }, @@ -28,6 +29,7 @@ export async function register() { import('@fastgpt/service/common/vectorDB/controller'), import('@/service/mongo'), import('@/service/common/system/volumnMongoWatch'), + import('@fastgpt/service/core/evaluation'), import('@/service/common/system/cron'), import('@/service/core/dataset/training/utils'), import('@fastgpt/service/worker/preload'), @@ -60,6 +62,7 @@ export async function register() { initAppTemplateTypes(); // getSystemPlugins(true); startMongoWatch(); + initEvaluationWorker(); startCron(); startTrainingQueue(true); diff --git a/projects/app/src/pageComponents/account/model/AddModelBox.tsx b/projects/app/src/pageComponents/account/model/AddModelBox.tsx index 8edc66f10b8d..851a78dc801d 100644 --- a/projects/app/src/pageComponents/account/model/AddModelBox.tsx +++ b/projects/app/src/pageComponents/account/model/AddModelBox.tsx @@ -683,16 +683,14 @@ export const ModelEditModal = ({ - {feConfigs?.isPlus && ( - - {t('account_model:use_in_eval')} - - - - - - - )} + + {t('account_model:use_in_eval')} + + + + + + diff --git a/projects/app/src/pageComponents/dashboard/Container.tsx b/projects/app/src/pageComponents/dashboard/Container.tsx index 2781959ae2c5..910632339a01 100644 --- a/projects/app/src/pageComponents/dashboard/Container.tsx +++ b/projects/app/src/pageComponents/dashboard/Container.tsx @@ -193,26 +193,14 @@ const DashboardContainer = ({ groupName: t('common:mcp_server'), children: [] }, - ...(feConfigs?.isPlus - ? [ - { - groupId: TabEnum.evaluation, - groupAvatar: 'kbTest', - groupName: t('common:app_evaluation'), - children: [] - } - ] - : []) + { + groupId: TabEnum.evaluation, + groupAvatar: 'kbTest', + groupName: t('common:app_evaluation'), + children: [] + } ]; - }, [ - currentType, - feConfigs.appTemplateCourse, - feConfigs?.isPlus, - pluginGroups, - t, - templateList, - templateTags - ]); + }, [currentType, feConfigs.appTemplateCourse, pluginGroups, t, templateList, templateTags]); const MenuIcon = useMemo( () => ( diff --git a/projects/app/src/pageComponents/app/evaluation/DetailModal.tsx b/projects/app/src/pageComponents/evaluation/DetailModal.tsx similarity index 98% rename from projects/app/src/pageComponents/app/evaluation/DetailModal.tsx rename to projects/app/src/pageComponents/evaluation/DetailModal.tsx index ccb91da7ef1d..161de826f4b2 100644 --- a/projects/app/src/pageComponents/app/evaluation/DetailModal.tsx +++ b/projects/app/src/pageComponents/evaluation/DetailModal.tsx @@ -25,7 +25,7 @@ import { getEvalItemsList, retryEvalItem, updateEvalItem -} from '@/web/core/app/api/evaluation'; +} from '@/web/core/evaluation/evaluation'; import { usePagination } from '@fastgpt/web/hooks/usePagination'; import { downloadFetch } from '@/web/common/system/utils'; import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConfirm'; @@ -35,9 +35,9 @@ import { useForm } from 'react-hook-form'; import { EvaluationStatusMap, EvaluationStatusEnum -} from '@fastgpt/global/core/app/evaluation/constants'; -import type { evaluationType, listEvalItemsItem } from '@fastgpt/global/core/app/evaluation/type'; -import type { updateEvalItemBody } from '@fastgpt/global/core/app/evaluation/api'; +} from '@fastgpt/global/core/evaluation/constants'; +import type { evaluationType, listEvalItemsItem } from '@fastgpt/global/core/evaluation/type'; +import type { updateEvalItemBody } from '@fastgpt/global/core/evaluation/api'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; const formatEvaluationStatus = (item: { status: number; errorMessage?: string }, t: TFunction) => { @@ -134,7 +134,7 @@ const EvaluationDetailModal = ({ const { runAsync: exportEval, loading: isDownloading } = useRequest2(async () => { await downloadFetch({ - url: `/api/proApi/core/app/evaluation/exportItems?evalId=${evalDetail._id}`, + url: `/api/core/evaluation/exportItems?evalId=${evalDetail._id}`, filename: `${evalDetail.name}.csv`, body: { title: t('dashboard_evaluation:evaluation_export_title'), diff --git a/projects/app/src/pages/api/core/evaluation/create.ts b/projects/app/src/pages/api/core/evaluation/create.ts new file mode 100644 index 000000000000..97388d6ea3c3 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/create.ts @@ -0,0 +1,159 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { addLog } from '@fastgpt/service/common/system/log'; +import { removeFilesByPaths } from '@fastgpt/service/common/file/utils'; +import { getUploadModel } from '@fastgpt/service/common/file/multer'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authApp } from '@fastgpt/service/support/permission/app/auth'; +import { readRawTextByLocalFile } from '@fastgpt/service/common/file/read/utils'; +import { createEvaluationUsage } from '@fastgpt/service/support/wallet/usage/controller'; +import { MongoEvaluation } from '@fastgpt/service/core/evaluation/evalSchema'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { addEvaluationJob } from '@fastgpt/service/core/evaluation/mq'; +import { addAuditLog, getI18nAppType } from '@fastgpt/service/support/user/audit/util'; +import { AuditEventEnum } from '@fastgpt/global/support/user/audit/constants'; +import { validateEvaluationFile } from '@fastgpt/service/core/evaluation/utils'; +import { EvaluationStatusEnum } from '@fastgpt/global/core/evaluation/constants'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { checkTeamAIPoints } from '@fastgpt/service/support/permission/teamLimit'; +import { checkTeamHasRunningEvaluation } from '@fastgpt/service/core/evaluation/utils'; + +export type createEvaluationBody = { + name: string; + appId: string; + evalModel: string; +}; + +const MAX_EVAL_ITEMS = process.env.EVAL_LINE_LIMIT ? Number(process.env.EVAL_LINE_LIMIT) : 1000; + +async function handler(req: ApiRequestProps, res: ApiResponseType) { + const filePaths: string[] = []; + + try { + const upload = getUploadModel({ + maxSize: global.feConfigs?.uploadFileMaxSize + }); + + const { file, data } = await upload.getUploadFile(req, res); + filePaths.push(file.path); + + if (file.mimetype !== 'text/csv') { + return Promise.reject('File must be a CSV file'); + } + + const { teamId, tmbId, app } = await authApp({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal, + appId: data.appId + }); + await checkTeamAIPoints(teamId); + await checkTeamHasRunningEvaluation(teamId); + + const { rawText } = await readRawTextByLocalFile({ + teamId, + tmbId, + path: file.path, + encoding: file.encoding, + getFormatText: false + }); + removeFilesByPaths(filePaths); + + const appVariables = app.chatConfig.variables; + + const { lines } = await validateEvaluationFile(rawText, appVariables); + + if (lines.length - 1 > MAX_EVAL_ITEMS) { + return Promise.reject(`File must be less than ${MAX_EVAL_ITEMS} lines`); + } + + const headers = lines[0].split(','); + const qIndex = headers.findIndex((h) => h.trim() === '*q'); + const aIndex = headers.findIndex((h) => h.trim() === '*a'); + const historyIndex = headers.findIndex((h) => h.trim() === 'history'); + + const { usageId } = await createEvaluationUsage({ + teamId, + tmbId, + appName: app.name, + model: data.evalModel + }); + + const evalItems = lines.slice(1).map((line) => { + const values = line.split(','); + const question = values[qIndex]; + const expectedResponse = values[aIndex]; + const history = historyIndex !== -1 ? values[historyIndex] : ''; + + const globalVariables = headers.slice(0, qIndex).reduce( + (acc, header, j) => { + const headerName = header.trim().replace(/^\*/, ''); + acc[headerName] = values[j] || ''; + return acc; + }, + {} as Record + ); + + return { + question, + expectedResponse, + history, + globalVariables + }; + }); + + await mongoSessionRun(async (session) => { + const [evaluation] = await MongoEvaluation.create( + [ + { + teamId, + tmbId, + appId: data.appId, + usageId, + evalModel: data.evalModel, + name: data.name + } + ], + { session, ordered: true } + ); + + const evalItemsWithId = evalItems.map((item) => ({ + question: item.question, + expectedResponse: item.expectedResponse, + history: item.history, + globalVariables: item.globalVariables, + evalId: evaluation._id, + status: EvaluationStatusEnum.queuing + })); + await MongoEvalItem.insertMany(evalItemsWithId, { + session, + ordered: false + }); + + await addEvaluationJob({ evalId: evaluation._id }); + }); + + addAuditLog({ + tmbId, + teamId, + event: AuditEventEnum.CREATE_EVALUATION, + params: { + name: data.name, + appName: app.name + } + }); + } catch (error) { + addLog.error(`create evaluation error: ${error}`); + removeFilesByPaths(filePaths); + return Promise.reject(error); + } +} + +export default NextAPI(handler); + +export const config = { + api: { + bodyParser: false + } +}; diff --git a/projects/app/src/pages/api/core/evaluation/delete.ts b/projects/app/src/pages/api/core/evaluation/delete.ts new file mode 100644 index 000000000000..2a322b12dd34 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/delete.ts @@ -0,0 +1,53 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { MongoEvaluation } from '@fastgpt/service/core/evaluation/evalSchema'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import { addAuditLog } from '@fastgpt/service/support/user/audit/util'; +import { AuditEventEnum } from '@fastgpt/global/support/user/audit/constants'; +import { removeEvaluationJob } from '@fastgpt/service/core/evaluation/mq'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; + +async function handler(req: ApiRequestProps<{}, { evalId: string }>, res: ApiResponseType) { + const { evalId } = req.query; + + const { tmbId, teamId, evaluation } = await authEval({ + req, + per: WritePermissionVal, + evalId, + authToken: true, + authApiKey: true + }); + + await mongoSessionRun(async (session) => { + await MongoEvaluation.deleteOne( + { + _id: evalId + }, + { session } + ); + + await MongoEvalItem.deleteMany( + { + evalId + }, + { session } + ); + + await removeEvaluationJob(evalId); + }); + + addAuditLog({ + tmbId, + teamId, + event: AuditEventEnum.DELETE_EVALUATION, + params: { + name: evaluation.name + } + }); + + return {}; +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/evaluation/deleteItem.ts b/projects/app/src/pages/api/core/evaluation/deleteItem.ts new file mode 100644 index 000000000000..77182c83f364 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/deleteItem.ts @@ -0,0 +1,23 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; + +async function handler( + req: ApiRequestProps<{}, { evalId: string; itemId: string }>, + res: ApiResponseType +) { + const { evalId, itemId } = req.query; + await authEval({ + req, + per: WritePermissionVal, + evalId, + authToken: true, + authApiKey: true + }); + + await MongoEvalItem.deleteOne({ _id: itemId, evalId }); +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/evaluation/exportItems.ts b/projects/app/src/pages/api/core/evaluation/exportItems.ts new file mode 100644 index 000000000000..05811137d6be --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/exportItems.ts @@ -0,0 +1,112 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvaluation } from '@fastgpt/service/core/evaluation/evalSchema'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { Types } from 'mongoose'; +import { readFromSecondary } from '@fastgpt/service/common/mongo/utils'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import { addAuditLog, getI18nAppType } from '@fastgpt/service/support/user/audit/util'; +import { AuditEventEnum } from '@fastgpt/global/support/user/audit/constants'; +import { generateCsv } from '@fastgpt/service/common/file/csv'; + +export type exportItemsQuery = { + evalId: string; +}; + +export type exportItemsBody = { + title: string; + statusMap: Record; +}; + +async function handler( + req: ApiRequestProps, + res: ApiResponseType +) { + const { evalId } = req.query; + const { title, statusMap } = req.body || {}; + + const { teamId, tmbId } = await authEval({ + req, + per: ReadPermissionVal, + evalId, + authToken: true, + authApiKey: true + }); + + const evaluation = await MongoEvaluation.findById(evalId); + if (!evaluation) { + return Promise.reject('Evaluation task does not exist'); + } + + res.setHeader('Content-Type', 'text/csv; charset=utf-8;'); + res.setHeader( + 'Content-Disposition', + `attachment; filename=${encodeURIComponent(evaluation?.name || 'evaluation')}.csv;` + ); + + const evalItems = await MongoEvalItem.find( + { + evalId: new Types.ObjectId(evalId) + }, + 'globalVariables question expectedResponse response status accuracy relevance semanticAccuracy score errorMessage', + { + ...readFromSecondary + } + ); + + const allVariableKeys = new Set(); + evalItems.forEach((doc) => { + if (doc.globalVariables) { + Object.keys(doc.globalVariables).forEach((key) => allVariableKeys.add(key)); + } + }); + const variableKeysArray = Array.from(allVariableKeys).sort(); + + const baseHeaders = title.split(','); + const headers = [...variableKeysArray, ...baseHeaders]; + + const data = evalItems.map((doc) => { + const question = doc.question || ''; + const expectedResponse = doc.expectedResponse || ''; + const response = doc.response || ''; + + const status = (() => { + if (doc.errorMessage) { + return 'Error'; // Show error when errorMessage exists + } + return statusMap[doc.status]?.label || 'Unknown'; + })(); + + const score = !!doc.score ? doc.score.toFixed(2) : '0'; + + const variableValues = variableKeysArray.map((key) => { + return doc.globalVariables?.[key] || ''; + }); + + return [...variableValues, question, expectedResponse, response, status, score]; + }); + + const csvContent = generateCsv(headers, data); + + res.write('\uFEFF' + csvContent); + + addAuditLog({ + tmbId, + teamId, + event: AuditEventEnum.EXPORT_EVALUATION, + params: { + name: evaluation.name + } + }); + + res.end(); +} + +export default NextAPI(handler); + +export const config = { + api: { + responseLimit: '100mb' + } +}; diff --git a/projects/app/src/pages/api/core/evaluation/list.ts b/projects/app/src/pages/api/core/evaluation/list.ts new file mode 100644 index 000000000000..7be49950185f --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/list.ts @@ -0,0 +1,194 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { + ReadPermissionVal, + PerResourceTypeEnum +} from '@fastgpt/global/support/permission/constant'; +import { MongoEvaluation } from '@fastgpt/service/core/evaluation/evalSchema'; +import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { PaginationResponse } from '@fastgpt/web/common/fetch/type'; +import type { listEvaluationsBody } from '@fastgpt/global/core/evaluation/api'; +import type { EvaluationSchemaType, evaluationType } from '@fastgpt/global/core/evaluation/type'; +import { replaceRegChars } from '@fastgpt/global/common/string/tools'; +import { MongoResourcePermission } from '@fastgpt/service/support/permission/schema'; +import { getGroupsByTmbId } from '@fastgpt/service/support/permission/memberGroup/controllers'; +import { getOrgIdSetWithParentByTmbId } from '@fastgpt/service/support/permission/org/controllers'; +import type { TeamMemberSchema } from '@fastgpt/global/support/user/team/type'; +import type { AppSchema } from '@fastgpt/global/core/app/type'; +import { i18nT } from '@fastgpt/web/i18n/utils'; +import { MongoApp } from '@fastgpt/service/core/app/schema'; + +async function handler( + req: ApiRequestProps, + res: ApiResponseType +): Promise> { + const { + teamId, + tmbId, + permission: teamPer + } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + + const { offset, pageSize } = parsePaginationRequest(req); + const { searchKey } = req.body; + + const [perList, myGroupMap, myOrgSet] = await Promise.all([ + MongoResourcePermission.find({ + resourceType: PerResourceTypeEnum.app, + teamId, + resourceId: { + $exists: true + } + }).lean(), + getGroupsByTmbId({ + tmbId, + teamId + }).then((item) => { + const map = new Map(); + item.forEach((item) => { + map.set(String(item._id), 1); + }); + return map; + }), + getOrgIdSetWithParentByTmbId({ + teamId, + tmbId + }) + ]); + const myPerAppIdList = perList + .filter( + (item) => + String(item.tmbId) === String(tmbId) || + myGroupMap.has(String(item.groupId)) || + myOrgSet.has(String(item.orgId)) + ) + .map((item) => new Types.ObjectId(item.resourceId)); + + const myAppIds = await MongoApp.find({ + teamId: new Types.ObjectId(teamId), + $or: [{ tmbId }, { parentId: { $in: myPerAppIdList } }] + }) + .select('_id') + .lean(); + + const match = { + teamId: new Types.ObjectId(teamId), + ...(searchKey && { name: { $regex: new RegExp(`${replaceRegChars(searchKey)}`, 'i') } }), + ...(!teamPer.isOwner && { + appId: { + $in: [...myPerAppIdList, ...myAppIds.map((item) => item._id)] + } + }) + }; + + const [evaluations, total] = await Promise.all([ + MongoEvaluation.aggregate( + buildPipeline(match, offset, pageSize) + ) as unknown as (EvaluationSchemaType & { + teamMember: TeamMemberSchema; + app: AppSchema; + stats: { + totalCount: number; + completedCount: number; + errorCount: number; + avgScore: number; + }; + })[], + MongoEvaluation.countDocuments(match) + ]); + + return { + total, + list: evaluations.map((item) => { + const { stats } = item; + const { totalCount = 0, completedCount = 0, errorCount = 0, avgScore } = stats || {}; + + const calculatedScore = totalCount === completedCount ? avgScore || 0 : undefined; + + return { + name: item.name, + appId: String(item.appId), + createTime: item.createTime, + finishTime: item.finishTime, + evalModel: item.evalModel, + errorMessage: item.errorMessage, + score: calculatedScore, + _id: String(item._id), + executorAvatar: item.teamMember?.avatar, + executorName: item.teamMember?.name, + appAvatar: item.app?.avatar, + appName: item.app?.name || i18nT('app:deleted'), + completedCount, + errorCount, + totalCount + }; + }) + }; +} + +const buildPipeline = (match: Record, offset: number, pageSize: number) => [ + { $match: match }, + { $sort: { createTime: -1 as const } }, + { $skip: offset }, + { $limit: pageSize }, + { + $lookup: { + from: 'team_members', + localField: 'tmbId', + foreignField: '_id', + as: 'teamMember' + } + }, + { + $lookup: { + from: 'apps', + localField: 'appId', + foreignField: '_id', + as: 'app' + } + }, + { + $lookup: { + from: 'eval_items', + let: { evalId: '$_id' }, + pipeline: [ + { $match: { $expr: { $eq: ['$evalId', '$$evalId'] } } }, + { + $group: { + _id: null, + totalCount: { $sum: 1 }, + completedCount: { + $sum: { $cond: [{ $eq: ['$status', 2] }, 1, 0] } + }, + errorCount: { + $sum: { + $cond: [{ $ifNull: ['$errorMessage', false] }, 1, 0] + } + }, + avgScore: { + $avg: { + $cond: [{ $ne: ['$score', null] }, '$score', '$$REMOVE'] + } + } + } + } + ], + as: 'evalStats' + } + }, + { + $addFields: { + teamMember: { $arrayElemAt: ['$teamMember', 0] }, + app: { $arrayElemAt: ['$app', 0] }, + stats: { $arrayElemAt: ['$evalStats', 0] } + } + } +]; + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/evaluation/listItems.ts b/projects/app/src/pages/api/core/evaluation/listItems.ts new file mode 100644 index 000000000000..a1f1078fb142 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/listItems.ts @@ -0,0 +1,83 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { Types } from 'mongoose'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import type { listEvalItemsBody } from '@fastgpt/global/core/evaluation/api'; +import type { listEvalItemsItem } from '@fastgpt/global/core/evaluation/type'; +import type { PaginationResponse } from '@fastgpt/web/common/fetch/type'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; + +async function handler( + req: ApiRequestProps, + res: ApiResponseType +): Promise> { + const { evalId } = req.body; + await authEval({ + req, + per: ReadPermissionVal, + evalId, + authToken: true, + authApiKey: true + }); + const { offset, pageSize } = parsePaginationRequest(req); + + const aggregationPipeline = [ + { + $match: { + evalId: new Types.ObjectId(evalId) + } + }, + { + $addFields: { + sortStatus: { + $switch: { + branches: [ + { case: { $ifNull: ['$errorMessage', false] }, then: 0 }, + { case: { $eq: ['$status', 1] }, then: 1 }, + { case: { $eq: ['$status', 0] }, then: 2 }, + { case: { $eq: ['$status', 2] }, then: 3 } + ], + default: 4 + } + } + } + }, + { + $sort: { sortStatus: 1 as const, _id: 1 as const } + }, + { + $skip: offset + }, + { + $limit: pageSize + } + ]; + + const [result, total] = await Promise.all([ + MongoEvalItem.aggregate(aggregationPipeline), + MongoEvalItem.countDocuments({ evalId }) + ]); + + return { + total, + list: result.map((item) => ({ + evalItemId: String(item._id), + evalId: String(item.evalId), + retry: item.retry, + question: item.question, + expectedResponse: item.expectedResponse, + response: item.response, + globalVariables: item.globalVariables, + status: item.status, + errorMessage: item.errorMessage, + accuracy: item.accuracy, + relevance: item.relevance, + semanticAccuracy: item.semanticAccuracy, + score: item.score + })) + }; +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/evaluation/retryItem.ts b/projects/app/src/pages/api/core/evaluation/retryItem.ts new file mode 100644 index 000000000000..897df6bfdb80 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/retryItem.ts @@ -0,0 +1,45 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { checkEvaluationJobActive, addEvaluationJob } from '@fastgpt/service/core/evaluation/mq'; +import { EvaluationStatusEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { retryEvalItemBody } from '@fastgpt/global/core/evaluation/api'; +import { checkTeamAIPoints } from '@fastgpt/service/support/permission/teamLimit'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; + +async function handler(req: ApiRequestProps, res: ApiResponseType) { + const { evalItemId } = req.body; + + const evaluationItem = await MongoEvalItem.findById(evalItemId); + if (!evaluationItem) return Promise.reject('evaluationItem not found'); + + const { teamId, evaluation } = await authEval({ + req, + per: WritePermissionVal, + evalId: evaluationItem.evalId, + authToken: true, + authApiKey: true + }); + + await checkTeamAIPoints(teamId); + + await MongoEvalItem.updateOne( + { _id: evalItemId }, + { + $set: { + status: EvaluationStatusEnum.queuing, + errorMessage: null, + response: null, + accuracy: null, + relevance: null, + semanticAccuracy: null, + score: null, + retry: 3 + } + } + ); + await addEvaluationJob({ evalId: evaluation._id }); +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/evaluation/updateItem.ts b/projects/app/src/pages/api/core/evaluation/updateItem.ts new file mode 100644 index 000000000000..cb1a2a32c5f0 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/updateItem.ts @@ -0,0 +1,46 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authEval } from '@fastgpt/service/support/permission/evaluation/auth'; +import { MongoEvalItem } from '@fastgpt/service/core/evaluation/evalItemSchema'; +import { addEvaluationJob } from '@fastgpt/service/core/evaluation/mq'; +import { EvaluationStatusEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { updateEvalItemBody } from '@fastgpt/global/core/evaluation/api'; +import { checkTeamAIPoints } from '@fastgpt/service/support/permission/teamLimit'; + +async function handler(req: ApiRequestProps, res: ApiResponseType) { + const { evalItemId, question, expectedResponse, variables } = req.body; + + const evaluationItem = await MongoEvalItem.findById(evalItemId); + if (!evaluationItem) return Promise.reject('evaluationItem not found'); + + const { teamId, evaluation } = await authEval({ + req, + evalId: evaluationItem.evalId, + authToken: true, + authApiKey: true + }); + await checkTeamAIPoints(teamId); + + await MongoEvalItem.updateOne( + { _id: evalItemId }, + { + $set: { + question, + expectedResponse, + status: EvaluationStatusEnum.queuing, + errorMessage: null, + response: null, + accuracy: null, + relevance: null, + semanticAccuracy: null, + score: null, + retry: 3, + globalVariables: variables + } + } + ); + + await addEvaluationJob({ evalId: evaluation._id }); +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/dashboard/evaluation/create.tsx b/projects/app/src/pages/dashboard/evaluation/create.tsx index a956719eab8c..30f0a37dc9b5 100644 --- a/projects/app/src/pages/dashboard/evaluation/create.tsx +++ b/projects/app/src/pages/dashboard/evaluation/create.tsx @@ -20,11 +20,11 @@ import { getAppDetailById } from '@/web/core/app/api'; import { useToast } from '@fastgpt/web/hooks/useToast'; import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; import { fileDownload } from '@/web/common/file/utils'; -import { postCreateEvaluation } from '@/web/core/app/api/evaluation'; +import { postCreateEvaluation } from '@/web/core/evaluation/evaluation'; import { useMemo, useState } from 'react'; import Markdown from '@/components/Markdown'; -import { getEvaluationFileHeader } from '@fastgpt/global/core/app/evaluation/utils'; -import { evaluationFileErrors } from '@fastgpt/global/core/app/evaluation/constants'; +import { getEvaluationFileHeader } from '@fastgpt/global/core/evaluation/utils'; +import { evaluationFileErrors } from '@fastgpt/global/core/evaluation/constants'; import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; import { getErrText } from '@fastgpt/global/common/error/utils'; diff --git a/projects/app/src/pages/dashboard/evaluation/index.tsx b/projects/app/src/pages/dashboard/evaluation/index.tsx index 5cbc16b1e601..cb9b9c00e68f 100644 --- a/projects/app/src/pages/dashboard/evaluation/index.tsx +++ b/projects/app/src/pages/dashboard/evaluation/index.tsx @@ -20,15 +20,15 @@ import SearchInput from '@fastgpt/web/components/common/Input/SearchInput'; import MyIcon from '@fastgpt/web/components/common/Icon'; import { useRouter } from 'next/router'; import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; -import { deleteEvaluation, getEvaluationList } from '@/web/core/app/api/evaluation'; +import { deleteEvaluation, getEvaluationList } from '@/web/core/evaluation/evaluation'; import { formatTime2YMDHM } from '@fastgpt/global/common/string/time'; import Avatar from '@fastgpt/web/components/common/Avatar'; import { usePagination } from '@fastgpt/web/hooks/usePagination'; import { useState, useEffect, useMemo } from 'react'; -import EvaluationDetailModal from '../../../pageComponents/app/evaluation/DetailModal'; +import EvaluationDetailModal from '@/pageComponents/evaluation/DetailModal'; import { useSystem } from '@fastgpt/web/hooks/useSystem'; import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; -import type { evaluationType } from '@fastgpt/global/core/app/evaluation/type'; +import type { evaluationType } from '@fastgpt/global/core/evaluation/type'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConfirm'; diff --git a/projects/app/src/web/core/app/api/evaluation.ts b/projects/app/src/web/core/evaluation/evaluation.ts similarity index 67% rename from projects/app/src/web/core/app/api/evaluation.ts rename to projects/app/src/web/core/evaluation/evaluation.ts index 5caf12f10640..814c053e17ee 100644 --- a/projects/app/src/web/core/app/api/evaluation.ts +++ b/projects/app/src/web/core/evaluation/evaluation.ts @@ -4,8 +4,8 @@ import type { listEvaluationsBody, retryEvalItemBody, updateEvalItemBody -} from '@fastgpt/global/core/app/evaluation/api'; -import type { evaluationType, listEvalItemsItem } from '@fastgpt/global/core/app/evaluation/type'; +} from '@fastgpt/global/core/evaluation/api'; +import type { evaluationType, listEvalItemsItem } from '@fastgpt/global/core/evaluation/type'; import type { PaginationResponse } from '@fastgpt/web/common/fetch/type'; export const postCreateEvaluation = ({ @@ -25,7 +25,7 @@ export const postCreateEvaluation = ({ formData.append('file', file, encodeURIComponent(file.name)); formData.append('data', JSON.stringify({ name, evalModel, appId })); - return POST(`/proApi/core/app/evaluation/create`, formData, { + return POST(`/core/evaluation/create`, formData, { timeout: 600000, onUploadProgress: (e) => { if (!e.total) return; @@ -40,19 +40,18 @@ export const postCreateEvaluation = ({ }; export const getEvaluationList = (data: listEvaluationsBody) => - POST>('/proApi/core/app/evaluation/list', data); + POST>('/core/evaluation/list', data); export const deleteEvaluation = (data: { evalId: string }) => - DELETE('/proApi/core/app/evaluation/delete', data); + DELETE('/core/evaluation/delete', data); export const getEvalItemsList = (data: listEvalItemsBody) => - POST>('/proApi/core/app/evaluation/listItems', data); + POST>('/core/evaluation/listItems', data); export const deleteEvalItem = (data: { evalItemId: string }) => - DELETE('/proApi/core/app/evaluation/deleteItem', data); + DELETE('/core/evaluation/deleteItem', data); -export const retryEvalItem = (data: retryEvalItemBody) => - POST('/proApi/core/app/evaluation/retryItem', data); +export const retryEvalItem = (data: retryEvalItemBody) => POST('/core/evaluation/retryItem', data); export const updateEvalItem = (data: updateEvalItemBody) => - POST('/proApi/core/app/evaluation/updateItem', data); + POST('/core/evaluation/updateItem', data); From 69069b8d87efbfe2d504c777c98012263db781db Mon Sep 17 00:00:00 2001 From: Jon Date: Fri, 22 Aug 2025 11:59:29 +0800 Subject: [PATCH 002/255] feat: Add evaluation dataset schemas for collections and data --- packages/global/core/evaluation/type.d.ts | 25 ++++++ .../evaluation/evalDatasetCollectionSchema.ts | 71 +++++++++++++++ .../core/evaluation/evalDatasetDataSchema.ts | 89 +++++++++++++++++++ 3 files changed, 185 insertions(+) create mode 100644 packages/service/core/evaluation/evalDatasetCollectionSchema.ts create mode 100644 packages/service/core/evaluation/evalDatasetDataSchema.ts diff --git a/packages/global/core/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts index 2a497a50979a..aeadae7e2e19 100644 --- a/packages/global/core/evaluation/type.d.ts +++ b/packages/global/core/evaluation/type.d.ts @@ -49,3 +49,28 @@ export type evaluationType = Pick< export type listEvalItemsItem = EvalItemSchemaType & { evalItemId: string; }; + +export type EvalDatasetCollectionSchemaType = { + _id: string; + teamId: string; + tmbId: string; + name: string; + description: string; + createTime: Date; + updateTime: Date; + dataCountByGen: number; + metadata: Record; +}; + +export type EvalDatasetDataSchemaType = { + _id: string; + datasetId: string; + user_input: string; + actual_output: string; + expected_output: string; + context: string[]; + retrieval_context: string[]; + metadata: Record; + createTime: Date; + updateTime: Date; +}; diff --git a/packages/service/core/evaluation/evalDatasetCollectionSchema.ts b/packages/service/core/evaluation/evalDatasetCollectionSchema.ts new file mode 100644 index 000000000000..3e179d4408cd --- /dev/null +++ b/packages/service/core/evaluation/evalDatasetCollectionSchema.ts @@ -0,0 +1,71 @@ +import { + TeamCollectionName, + TeamMemberCollectionName +} from '@fastgpt/global/support/user/team/constant'; +import { connectionMongo, getMongoModel } from '../../common/mongo'; +import type { EvalDatasetCollectionSchemaType } from '@fastgpt/global/core/evaluation/type'; + +const { Schema } = connectionMongo; + +export const EvalDatasetCollectionName = 'eval_dataset_collections'; + +const EvalDatasetCollectionSchema = new Schema({ + teamId: { + type: Schema.Types.ObjectId, + ref: TeamCollectionName, + required: true, + index: true + }, + tmbId: { + type: Schema.Types.ObjectId, + ref: TeamMemberCollectionName, + required: true + }, + name: { + type: String, + required: true, + trim: true, + maxlength: 100 + }, + description: { + type: String, + default: '', + trim: true, + maxlength: 500 + }, + createTime: { + type: Date, + default: Date.now, + immutable: true + }, + updateTime: { + type: Date, + default: Date.now + }, + dataCountByGen: { + type: Number, + default: 0, + min: 0 + }, + metadata: { + type: Schema.Types.Mixed, + default: {} + } +}); + +// Indexes for efficient queries +EvalDatasetCollectionSchema.index({ teamId: 1, createTime: -1 }); +EvalDatasetCollectionSchema.index({ teamId: 1, name: 1 }, { unique: true }); +EvalDatasetCollectionSchema.index({ teamId: 1, updateTime: -1 }); + +// Update the updateTime on save +EvalDatasetCollectionSchema.pre('save', function () { + if (this.isModified() && !this.isNew) { + this.updateTime = new Date(); + } +}); + +export const MongoEvalDatasetCollection = getMongoModel( + EvalDatasetCollectionName, + EvalDatasetCollectionSchema +); diff --git a/packages/service/core/evaluation/evalDatasetDataSchema.ts b/packages/service/core/evaluation/evalDatasetDataSchema.ts new file mode 100644 index 000000000000..5e6424e75635 --- /dev/null +++ b/packages/service/core/evaluation/evalDatasetDataSchema.ts @@ -0,0 +1,89 @@ +import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; +import { connectionMongo, getMongoModel } from '../../common/mongo'; +import { EvalDatasetCollectionName } from './evalDatasetCollectionSchema'; + +const { Schema } = connectionMongo; + +export const EvalDatasetDataCollectionName = 'eval_dataset_data'; + +const EvalDatasetDataSchema = new Schema({ + datasetId: { + type: Schema.Types.ObjectId, + ref: EvalDatasetCollectionName, + required: true, + index: true + }, + user_input: { + type: String, + default: '', + trim: true + }, + actual_output: { + type: String, + default: '', + trim: true + }, + expected_output: { + type: String, + default: '', + trim: true + }, + context: { + type: [ + { + type: String, + trim: true + } + ], + default: [], + validate: { + validator: (arr: string[]) => arr.length <= 100, + message: 'Context array cannot exceed 100 items' + } + }, + retrieval_context: { + type: [ + { + type: String, + trim: true + } + ], + default: [] + }, + metadata: { + type: Schema.Types.Mixed, + default: {} + }, + createTime: { + type: Date, + default: Date.now, + immutable: true + }, + updateTime: { + type: Date, + default: Date.now + } +}); + +// Indexes for efficient queries +EvalDatasetDataSchema.index({ datasetId: 1, createTime: -1 }); +EvalDatasetDataSchema.index({ datasetId: 1, updateTime: -1 }); + +// Text search index for searching within inputs and outputs +EvalDatasetDataSchema.index({ + user_input: 'text', + expected_output: 'text', + actual_output: 'text' +}); + +// Update the updateTime on save +EvalDatasetDataSchema.pre('save', function () { + if (this.isModified() && !this.isNew) { + this.updateTime = new Date(); + } +}); + +export const MongoEvalDatasetData = getMongoModel( + EvalDatasetDataCollectionName, + EvalDatasetDataSchema +); From 3aa78b13193ec023abde05138886f76925b53e1d Mon Sep 17 00:00:00 2001 From: Jon Date: Fri, 22 Aug 2025 17:14:11 +0800 Subject: [PATCH 003/255] feat: Add API for creating evaluation dataset collections --- packages/global/core/evaluation/api.d.ts | 5 + .../evaluation/dataset/collection/create.ts | 93 +++++ .../dataset/collection/create.test.ts | 343 ++++++++++++++++++ 3 files changed, 441 insertions(+) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 8c1d8e87335c..092fd7ee16b0 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -18,3 +18,8 @@ export type updateEvalItemBody = { expectedResponse: string; variables: Record; }; + +export type createEvalDatasetCollectionBody = { + name: string; + description?: string; +}; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts new file mode 100644 index 000000000000..c5e52b7ba7d3 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts @@ -0,0 +1,93 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import type { createEvalDatasetCollectionBody } from '@fastgpt/global/core/evaluation/api'; + +export type EvalDatasetCollectionCreateQuery = {}; +export type EvalDatasetCollectionCreateBody = createEvalDatasetCollectionBody; +export type EvalDatasetCollectionCreateResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { name, description = '' } = req.body; + + // Parameter validation + if (!name || typeof name !== 'string' || name.trim().length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + } + + if (name.trim().length > 100) { + return Promise.reject({ + statusCode: 400, + message: 'Name must be less than 100 characters' + }); + } + + if (description && typeof description !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'Description must be a string' + }); + } + + if (description && description.length > 500) { + return Promise.reject({ + statusCode: 400, + message: 'Description must be less than 500 characters' + }); + } + + // Authentication and authorization + const { teamId, tmbId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + // Check for name conflicts within team + const existingDataset = await MongoEvalDatasetCollection.findOne({ + teamId, + name: name.trim() + }); + + if (existingDataset) { + return Promise.reject({ + statusCode: 409, + message: 'A dataset with this name already exists' + }); + } + + // Create dataset collection + const datasetId = await mongoSessionRun(async (session) => { + const [{ _id }] = await MongoEvalDatasetCollection.create( + [ + { + teamId, + tmbId, + name: name.trim(), + description: description.trim() + } + ], + { session, ordered: true } + ); + + return _id; + }); + + // TODO: Add audit log + + return datasetId.toString(); +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts new file mode 100644 index 000000000000..c6927eee5c29 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts @@ -0,0 +1,343 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/create'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn(), + create: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); + +describe('EvalDatasetCollection Create API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const mockDatasetId = '65f5b5b5b5b5b5b5b5b5b5b5'; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback({} as any); + }); + + mockMongoEvalDatasetCollection.create.mockResolvedValue([{ _id: mockDatasetId }] as any); + }); + + describe('Parameter Validation', () => { + it('should reject when name is missing', async () => { + const req = { + body: { description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is empty string', async () => { + const req = { + body: { name: '', description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is only whitespace', async () => { + const req = { + body: { name: ' ', description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is not a string', async () => { + const req = { + body: { name: 123, description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name exceeds 100 characters', async () => { + const longName = 'a'.repeat(101); + const req = { + body: { name: longName, description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name must be less than 100 characters' + }); + }); + + it('should reject when description is not a string', async () => { + const req = { + body: { name: 'Test Dataset', description: 123 } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Description must be a string' + }); + }); + + it('should reject when description exceeds 500 characters', async () => { + const longDescription = 'a'.repeat(501); + const req = { + body: { name: 'Test Dataset', description: longDescription } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Description must be less than 500 characters' + }); + }); + + it('should accept valid name without description', async () => { + const req = { + body: { name: 'Test Dataset' } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + }); + + it('should accept valid name with empty description', async () => { + const req = { + body: { name: 'Test Dataset', description: '' } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Name Uniqueness Validation', () => { + it('should reject when dataset name already exists in team', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue({ + _id: 'existing-dataset-id', + name: 'Test Dataset', + teamId: validTeamId + } as any); + + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 409, + message: 'A dataset with this name already exists' + }); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + teamId: validTeamId, + name: 'Test Dataset' + }); + }); + + it('should check name with trimmed whitespace', async () => { + const req = { + body: { name: ' Test Dataset ', description: 'Test description' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + teamId: validTeamId, + name: 'Test Dataset' + }); + }); + }); + + describe('Dataset Creation', () => { + it('should create dataset with correct parameters', async () => { + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + const result = await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + expect(mockMongoEvalDatasetCollection.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + name: 'Test Dataset', + description: 'Test description' + } + ], + { session: {}, ordered: true } + ); + expect(result).toBe(mockDatasetId); + }); + + it('should trim name and description before saving', async () => { + const req = { + body: { name: ' Test Dataset ', description: ' Test description ' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + name: 'Test Dataset', + description: 'Test description' + } + ], + { session: {}, ordered: true } + ); + }); + + it('should handle empty description correctly', async () => { + const req = { + body: { name: 'Test Dataset', description: '' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + name: 'Test Dataset', + description: '' + } + ], + { session: {}, ordered: true } + ); + }); + + it('should handle missing description correctly', async () => { + const req = { + body: { name: 'Test Dataset' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + name: 'Test Dataset', + description: '' + } + ], + { session: {}, ordered: true } + ); + }); + + it('should return dataset ID as string', async () => { + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + expect(typeof result).toBe('string'); + }); + + it('should propagate database creation errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoSessionRun.mockRejectedValue(dbError); + + const req = { + body: { name: 'Test Dataset', description: 'Test description' } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + }); + + describe('Edge Cases', () => { + it('should handle exactly 100 character name', async () => { + const exactName = 'a'.repeat(100); + const req = { + body: { name: exactName, description: 'Test description' } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + }); + + it('should handle exactly 500 character description', async () => { + const exactDescription = 'a'.repeat(500); + const req = { + body: { name: 'Test Dataset', description: exactDescription } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + }); + + it('should handle special characters in name', async () => { + const specialName = 'Test-Dataset_2024@Company!'; + const req = { + body: { name: specialName, description: 'Test description' } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDatasetId); + }); + }); +}); From 64c6dd45b6dcb8a196cae1a42a464ce3f22bea0f Mon Sep 17 00:00:00 2001 From: Jon Date: Fri, 22 Aug 2025 18:16:28 +0800 Subject: [PATCH 004/255] feat: Add update API for evaluation dataset collection --- packages/global/core/evaluation/api.d.ts | 8 +- .../evaluation/dataset/collection/update.ts | 119 ++++ .../dataset/collection/update.test.ts | 581 ++++++++++++++++++ 3 files changed, 707 insertions(+), 1 deletion(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 092fd7ee16b0..a490a2dedc7f 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -19,7 +19,13 @@ export type updateEvalItemBody = { variables: Record; }; -export type createEvalDatasetCollectionBody = { +type EvalDatasetCollectionBase = { name: string; description?: string; }; + +export type createEvalDatasetCollectionBody = EvalDatasetCollectionBase; + +export type updateEvalDatasetCollectionBody = EvalDatasetCollectionBase & { + collectionId: string; +}; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts new file mode 100644 index 000000000000..b19e1552169e --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts @@ -0,0 +1,119 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import type { updateEvalDatasetCollectionBody } from '@fastgpt/global/core/evaluation/api'; + +export type EvalDatasetCollectionUpdateQuery = {}; +export type EvalDatasetCollectionUpdateBody = updateEvalDatasetCollectionBody; +export type EvalDatasetCollectionUpdateResponse = string; +async function handler( + req: ApiRequestProps +): Promise { + const { collectionId, name, description = '' } = req.body; + + // Parameter validation + if (!collectionId || typeof collectionId !== 'string' || collectionId.trim().length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'Collection ID is required and must be a non-empty string' + }); + } + + if (!name || typeof name !== 'string' || name.trim().length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + } + + if (name.trim().length > 100) { + return Promise.reject({ + statusCode: 400, + message: 'Name must be less than 100 characters' + }); + } + + if (description && typeof description !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'Description must be a string' + }); + } + + if (description && description.length > 500) { + return Promise.reject({ + statusCode: 400, + message: 'Description must be less than 500 characters' + }); + } + + // TODO: Authentication check - verify user is authenticated via cookie or token + // TODO: Authorization check - verify user has write permissions for this resource + const { teamId, tmbId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + // TODO: Audit log - record the update operation for compliance and tracking + + // Check if collection exists and belongs to the team + const existingCollection = await MongoEvalDatasetCollection.findOne({ + _id: collectionId, + teamId + }); + + if (!existingCollection) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset collection not found' + }); + } + + // Check for name conflicts within team (excluding current collection) + const nameConflict = await MongoEvalDatasetCollection.findOne({ + teamId, + name: name.trim(), + _id: { $ne: collectionId } + }); + + if (nameConflict) { + return Promise.reject({ + statusCode: 500, + message: 'A dataset with this name already exists' + }); + } + + // Update dataset collection + try { + await mongoSessionRun(async (session) => { + await MongoEvalDatasetCollection.updateOne( + { _id: collectionId, teamId, tmbId }, + { + $set: { + name: name.trim(), + description: description.trim(), + updateTime: new Date() + } + }, + { session } + ); + }); + + return 'success'; + } catch (error) { + return Promise.reject({ + statusCode: 500, + message: 'Failed to update dataset collection' + }); + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts new file mode 100644 index 000000000000..ef3ceb1c3c3b --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts @@ -0,0 +1,581 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/update'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn(), + updateOne: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); + +describe('EvalDatasetCollection Update API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const mockCollectionId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const existingCollection = { + _id: mockCollectionId, + teamId: validTeamId, + name: 'Old Dataset Name', + description: 'Old description' + }; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + // Default setup: collection exists, no name conflict + mockMongoEvalDatasetCollection.findOne + .mockResolvedValueOnce(existingCollection as any) // First call: existence check + .mockResolvedValueOnce(null); // Second call: name conflict check + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback({} as any); + }); + + mockMongoEvalDatasetCollection.updateOne.mockResolvedValue({ modifiedCount: 1 } as any); + }); + + describe('Parameter Validation', () => { + it('should reject when collectionId is missing', async () => { + const req = { + body: { name: 'Updated Name', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Collection ID is required and must be a non-empty string' + }); + }); + + it('should reject when collectionId is empty string', async () => { + const req = { + body: { collectionId: '', name: 'Updated Name', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Collection ID is required and must be a non-empty string' + }); + }); + + it('should reject when collectionId is only whitespace', async () => { + const req = { + body: { collectionId: ' ', name: 'Updated Name', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Collection ID is required and must be a non-empty string' + }); + }); + + it('should reject when collectionId is not a string', async () => { + const req = { + body: { collectionId: 123, name: 'Updated Name', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Collection ID is required and must be a non-empty string' + }); + }); + + it('should reject when name is missing', async () => { + const req = { + body: { collectionId: mockCollectionId, description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is empty string', async () => { + const req = { + body: { collectionId: mockCollectionId, name: '', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is only whitespace', async () => { + const req = { + body: { collectionId: mockCollectionId, name: ' ', description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name is not a string', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 123, description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name is required and must be a non-empty string' + }); + }); + + it('should reject when name exceeds 100 characters', async () => { + const longName = 'a'.repeat(101); + const req = { + body: { collectionId: mockCollectionId, name: longName, description: 'Updated description' } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Name must be less than 100 characters' + }); + }); + + it('should reject when description is not a string', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name', description: 123 } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Description must be a string' + }); + }); + + it('should reject when description exceeds 500 characters', async () => { + const longDescription = 'a'.repeat(501); + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name', description: longDescription } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 400, + message: 'Description must be less than 500 characters' + }); + }); + + it('should accept valid parameters', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should accept valid name without description', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name' } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should accept valid name with empty description', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name', description: '' } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Collection Existence Validation', () => { + it('should reject when collection does not exist', async () => { + // Reset mock and set up for this specific test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne.mockResolvedValueOnce(null); + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 404, + message: 'Dataset collection not found' + }); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: mockCollectionId, + teamId: validTeamId + }); + }); + + it('should reject when collection belongs to different team', async () => { + // Reset mock and set up for this specific test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne.mockResolvedValueOnce(null); + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 404, + message: 'Dataset collection not found' + }); + }); + + it('should proceed when collection exists and belongs to team', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + }); + + describe('Name Uniqueness Validation', () => { + it('should reject when updated name conflicts with another collection in team', async () => { + const conflictingCollection = { + _id: 'different-id', + name: 'Updated Name', + teamId: validTeamId + }; + + // Reset mock and set up for this specific test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne + .mockResolvedValueOnce(existingCollection as any) // First call for existence check + .mockResolvedValueOnce(conflictingCollection as any); // Second call for name conflict check + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 500, + message: 'A dataset with this name already exists' + }); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + teamId: validTeamId, + name: 'Updated Name', + _id: { $ne: mockCollectionId } + }); + }); + + it('should allow keeping the same name for the same collection', async () => { + // Reset the mock to set up specific behavior for this test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne + .mockResolvedValueOnce(existingCollection as any) // First call for existence check + .mockResolvedValueOnce(null); // Second call for name conflict check returns null + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Old Dataset Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should check name with trimmed whitespace', async () => { + // Reset the mock to set up specific behavior for this test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne + .mockResolvedValueOnce(existingCollection as any) + .mockResolvedValueOnce(null); + + const req = { + body: { + collectionId: mockCollectionId, + name: ' Updated Name ', + description: 'Updated description' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + teamId: validTeamId, + name: 'Updated Name', + _id: { $ne: mockCollectionId } + }); + }); + }); + + describe('Collection Update', () => { + it('should update collection with correct parameters', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + expect(mockMongoEvalDatasetCollection.updateOne).toHaveBeenCalledWith( + { _id: mockCollectionId, teamId: validTeamId, tmbId: validTmbId }, + { + $set: { + name: 'Updated Name', + description: 'Updated description', + updateTime: expect.any(Date) + } + }, + { session: {} } + ); + expect(result).toBe('success'); + }); + + it('should trim name and description before saving', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: ' Updated Name ', + description: ' Updated description ' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.updateOne).toHaveBeenCalledWith( + { _id: mockCollectionId, teamId: validTeamId, tmbId: validTmbId }, + { + $set: { + name: 'Updated Name', + description: 'Updated description', + updateTime: expect.any(Date) + } + }, + { session: {} } + ); + }); + + it('should handle empty description correctly', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name', description: '' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.updateOne).toHaveBeenCalledWith( + { _id: mockCollectionId, teamId: validTeamId, tmbId: validTmbId }, + { + $set: { + name: 'Updated Name', + description: '', + updateTime: expect.any(Date) + } + }, + { session: {} } + ); + }); + + it('should handle missing description correctly', async () => { + const req = { + body: { collectionId: mockCollectionId, name: 'Updated Name' } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.updateOne).toHaveBeenCalledWith( + { _id: mockCollectionId, teamId: validTeamId, tmbId: validTmbId }, + { + $set: { + name: 'Updated Name', + description: '', + updateTime: expect.any(Date) + } + }, + { session: {} } + ); + }); + + it('should return success string', async () => { + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + expect(typeof result).toBe('string'); + }); + + it('should propagate database update errors', async () => { + // Reset the mock to set up specific behavior for this test + mockMongoEvalDatasetCollection.findOne.mockReset(); + mockMongoEvalDatasetCollection.findOne + .mockResolvedValueOnce(existingCollection as any) // First call: existence check + .mockResolvedValueOnce(null); // Second call: name conflict check + + const dbError = new Error('Database connection failed'); + mockMongoSessionRun.mockRejectedValue(dbError); + + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual({ + statusCode: 500, + message: 'Failed to update dataset collection' + }); + }); + }); + + describe('Edge Cases', () => { + it('should handle exactly 100 character name', async () => { + const exactName = 'a'.repeat(100); + const req = { + body: { + collectionId: mockCollectionId, + name: exactName, + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle exactly 500 character description', async () => { + const exactDescription = 'a'.repeat(500); + const req = { + body: { + collectionId: mockCollectionId, + name: 'Updated Name', + description: exactDescription + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle special characters in name', async () => { + const specialName = 'Updated-Dataset_2024@Company!'; + const req = { + body: { + collectionId: mockCollectionId, + name: specialName, + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle ObjectId string format for collectionId', async () => { + const objectIdString = '507f1f77bcf86cd799439011'; + const req = { + body: { + collectionId: objectIdString, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle very long valid ObjectId', async () => { + const longValidId = '507f1f77bcf86cd799439011'; + const req = { + body: { + collectionId: longValidId, + name: 'Updated Name', + description: 'Updated description' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + }); +}); From aa67781ebcf9aedb6e90b6f21cc91d9593fa5ec5 Mon Sep 17 00:00:00 2001 From: Jon Date: Sat, 23 Aug 2025 10:19:43 +0800 Subject: [PATCH 005/255] feat: Add endpoint to list evaluation dataset collections --- packages/global/core/evaluation/api.d.ts | 17 +- .../evaluation/dataset/collection/list.ts | 103 ++++ .../dataset/collection/list.test.ts | 579 ++++++++++++++++++ 3 files changed, 698 insertions(+), 1 deletion(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index a490a2dedc7f..6a069e3fd5a3 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -1,4 +1,5 @@ -import type { PaginationProps } from '@fastgpt/web/common/fetch/type'; +import type { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type'; +import type { EvalDatasetCollectionSchemaType } from './type'; export type listEvaluationsBody = PaginationProps<{ searchKey?: string; @@ -29,3 +30,17 @@ export type createEvalDatasetCollectionBody = EvalDatasetCollectionBase; export type updateEvalDatasetCollectionBody = EvalDatasetCollectionBase & { collectionId: string; }; + +export type listEvalDatasetCollectionBody = PaginationProps<{ + searchKey?: string; +}>; + +export type listEvalDatasetCollectionResponse = PaginationResponse< + Pick< + EvalDatasetCollectionSchemaType, + '_id' | 'name' | 'description' | 'createTime' | 'updateTime' | 'dataCountByGen' + > & { + creatorAvatar?: string; + creatorName?: string; + } +>; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts new file mode 100644 index 000000000000..f87e6764c9be --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts @@ -0,0 +1,103 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { + listEvalDatasetCollectionBody, + listEvalDatasetCollectionResponse +} from '@fastgpt/global/core/evaluation/api'; +import { replaceRegChars } from '@fastgpt/global/common/string/tools'; + +async function handler( + req: ApiRequestProps +): Promise { + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + + // Parse request parameters + const { offset, pageSize } = parsePaginationRequest(req); + const { searchKey } = req.body; + + // Build MongoDB pipeline + const match: Record = { + teamId: new Types.ObjectId(teamId) + }; + + // Add search filter if provided + if (searchKey && typeof searchKey === 'string' && searchKey.trim().length > 0) { + match.name = { $regex: new RegExp(`${replaceRegChars(searchKey.trim())}`, 'i') }; + } + + try { + // Execute aggregation with pagination + const [collections, total] = await Promise.all([ + MongoEvalDatasetCollection.aggregate(buildPipeline(match, offset, pageSize)), + MongoEvalDatasetCollection.countDocuments(match) + ]); + + // TODO: Audit Log - Log successful response + + return { + total, + list: collections.map((item) => ({ + _id: String(item._id), + name: item.name, + description: item.description || '', + createTime: item.createTime, + updateTime: item.updateTime, + dataCountByGen: item.dataCountByGen || 0, + creatorAvatar: item.teamMember?.avatar, + creatorName: item.teamMember?.name + })) + }; + } catch (error) { + console.error('Database error in eval dataset collection list:', error); + throw error; + } +} + +const buildPipeline = (match: Record, offset: number, pageSize: number) => [ + { $match: match }, + { $sort: { createTime: -1 as const } }, + { $skip: offset }, + { $limit: pageSize }, + { + $lookup: { + from: 'team_members', + localField: 'tmbId', + foreignField: '_id', + as: 'teamMember' + } + }, + { + $addFields: { + teamMember: { $arrayElemAt: ['$teamMember', 0] } + } + }, + { + $project: { + _id: 1, + name: 1, + description: 1, + createTime: 1, + updateTime: 1, + dataCountByGen: 1, + teamMember: { + avatar: 1, + name: 1 + } + } + } +]; + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts new file mode 100644 index 000000000000..bdf8aedda51b --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts @@ -0,0 +1,579 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/list'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { Types } from '@fastgpt/service/common/mongo'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + aggregate: vi.fn(), + countDocuments: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); + +describe('EvalDatasetCollection List API', () => { + const validTeamId = '65f5b5b5b5b5b5b5b5b5b5b0'; + const validTmbId = '65f5b5b5b5b5b5b5b5b5b5b9'; + const mockCollections = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + description: 'First dataset', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02'), + dataCountByGen: 5, + teamMember: { + avatar: 'avatar1.jpg', + name: 'User One' + } + }, + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + name: 'Dataset 2', + description: 'Second dataset', + createTime: new Date('2024-01-03'), + updateTime: new Date('2024-01-04'), + dataCountByGen: 10, + teamMember: { + avatar: 'avatar2.jpg', + name: 'User Two' + } + } + ]; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(mockCollections); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(2); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Pagination', () => { + it('should handle default pagination parameters', async () => { + const req = { + body: { pageSize: 20 } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { $match: { teamId: new Types.ObjectId(validTeamId) } }, + { $sort: { createTime: -1 } }, + { $skip: 0 }, + { $limit: 20 } + ]) + ); + expect(result.total).toBe(2); + expect(result.list).toHaveLength(2); + }); + + it('should handle custom pagination parameters', async () => { + const req = { + body: { pageNum: 2, pageSize: 5 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { $match: { teamId: new Types.ObjectId(validTeamId) } }, + { $sort: { createTime: -1 } }, + { $skip: 5 }, + { $limit: 5 } + ]) + ); + }); + + it('should handle page number 1', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 0 }, { $limit: 10 }]) + ); + }); + }); + + describe('Search Functionality', () => { + it('should handle empty search key', async () => { + const req = { + body: { searchKey: '', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { teamId: new Types.ObjectId(validTeamId) } }]) + ); + }); + + it('should handle whitespace-only search key', async () => { + const req = { + body: { searchKey: ' ', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { teamId: new Types.ObjectId(validTeamId) } }]) + ); + }); + + it('should handle valid search key', async () => { + const req = { + body: { searchKey: 'Dataset 1', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: new RegExp('Dataset 1', 'i') } + }; + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + + expect(mockMongoEvalDatasetCollection.countDocuments).toHaveBeenCalledWith(expectedMatch); + }); + + it('should trim search key before processing', async () => { + const req = { + body: { searchKey: ' Dataset 1 ', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: new RegExp('Dataset 1', 'i') } + }; + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + }); + + it('should escape special regex characters in search key', async () => { + const req = { + body: { searchKey: 'Dataset[1]', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: new RegExp('Dataset\\[1\\]', 'i') } + }; + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + }); + + it('should handle non-string search key', async () => { + const req = { + body: { searchKey: 123, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { teamId: new Types.ObjectId(validTeamId) } }]) + ); + }); + }); + + describe('MongoDB Pipeline', () => { + it('should build correct aggregation pipeline', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith([ + { $match: { teamId: new Types.ObjectId(validTeamId) } }, + { $sort: { createTime: -1 } }, + { $skip: 0 }, + { $limit: 10 }, + { + $lookup: { + from: 'team_members', + localField: 'tmbId', + foreignField: '_id', + as: 'teamMember' + } + }, + { + $addFields: { + teamMember: { $arrayElemAt: ['$teamMember', 0] } + } + }, + { + $project: { + _id: 1, + name: 1, + description: 1, + createTime: 1, + updateTime: 1, + dataCountByGen: 1, + teamMember: { + avatar: 1, + name: 1 + } + } + } + ]); + }); + + it('should include search filter in pipeline when searchKey provided', async () => { + const req = { + body: { searchKey: 'test', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { + $match: { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: new RegExp('test', 'i') } + } + } + ]) + ); + }); + }); + + describe('Response Format', () => { + it('should return correct response structure', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result).toEqual({ + total: 2, + list: [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + description: 'First dataset', + createTime: expect.any(Date), + updateTime: expect.any(Date), + dataCountByGen: 5, + creatorAvatar: 'avatar1.jpg', + creatorName: 'User One' + }, + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + name: 'Dataset 2', + description: 'Second dataset', + createTime: expect.any(Date), + updateTime: expect.any(Date), + dataCountByGen: 10, + creatorAvatar: 'avatar2.jpg', + creatorName: 'User Two' + } + ] + }); + }); + + it('should handle collections without teamMember data', async () => { + const collectionsWithoutTeamMember = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + description: 'First dataset', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02'), + dataCountByGen: 5, + teamMember: null + } + ]; + + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(collectionsWithoutTeamMember); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(1); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0]).toEqual({ + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + description: 'First dataset', + createTime: expect.any(Date), + updateTime: expect.any(Date), + dataCountByGen: 5, + creatorAvatar: undefined, + creatorName: undefined + }); + }); + + it('should handle missing description', async () => { + const collectionsWithoutDescription = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02'), + dataCountByGen: 5, + teamMember: { + avatar: 'avatar1.jpg', + name: 'User One' + } + } + ]; + + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(collectionsWithoutDescription); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(1); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0].description).toBe(''); + }); + + it('should handle missing dataCountByGen', async () => { + const collectionsWithoutDataCount = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b1', + name: 'Dataset 1', + description: 'First dataset', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02'), + teamMember: { + avatar: 'avatar1.jpg', + name: 'User One' + } + } + ]; + + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(collectionsWithoutDataCount); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(1); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0].dataCountByGen).toBe(0); + }); + + it('should convert ObjectId to string', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + result.list.forEach((item) => { + expect(typeof item._id).toBe('string'); + }); + }); + }); + + describe('Empty Results', () => { + it('should handle empty collection list', async () => { + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue([]); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(0); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result).toEqual({ + total: 0, + list: [] + }); + }); + + it('should handle zero total count', async () => { + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue([]); + mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(0); + + const req = { + body: { searchKey: 'nonexistent', pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.total).toBe(0); + expect(result.list).toEqual([]); + }); + }); + + describe('Error Handling', () => { + it('should propagate database aggregate errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetCollection.aggregate.mockRejectedValue(dbError); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should propagate database count errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetCollection.countDocuments.mockRejectedValue(dbError); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should handle Promise.all rejection', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(mockCollections); + mockMongoEvalDatasetCollection.countDocuments.mockRejectedValue(dbError); + + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + }); + + describe('Team Isolation', () => { + it('should filter results by team ID', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { teamId: new Types.ObjectId(validTeamId) } }]) + ); + + expect(mockMongoEvalDatasetCollection.countDocuments).toHaveBeenCalledWith({ + teamId: new Types.ObjectId(validTeamId) + }); + }); + + it('should include team ID in search filter', async () => { + const req = { + body: { searchKey: 'test', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: new RegExp('test', 'i') } + }; + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + + expect(mockMongoEvalDatasetCollection.countDocuments).toHaveBeenCalledWith(expectedMatch); + }); + }); + + describe('Edge Cases', () => { + it('should handle very large page size', async () => { + const req = { + body: { pageNum: 1, pageSize: 1000 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $limit: 1000 }]) + ); + }); + + it('should handle high page number', async () => { + const req = { + body: { pageNum: 100, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 990 }]) + ); + }); + + it('should handle complex search patterns', async () => { + const complexSearchKey = 'Dataset-1_test@2024!'; + const req = { + body: { searchKey: complexSearchKey, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { + $match: { + teamId: new Types.ObjectId(validTeamId), + name: { $regex: expect.any(RegExp) } + } + } + ]) + ); + }); + }); +}); From 3ba414397e08d56dfa1a842ad58d88c6f7b1f5f2 Mon Sep 17 00:00:00 2001 From: Jon Date: Sat, 23 Aug 2025 16:37:20 +0800 Subject: [PATCH 006/255] feat: Add dataset import and creation endpoints for evaluations --- packages/global/core/evaluation/api.d.ts | 39 +- packages/global/core/evaluation/constants.ts | 8 + packages/global/core/evaluation/type.d.ts | 3 +- .../core/evaluation/evalDatasetDataSchema.ts | 11 + .../core/evaluation/dataset/data/create.ts | 122 ++++++ .../core/evaluation/dataset/data/fileId.ts | 348 ++++++++++++++++++ .../api/core/evaluation/dataset/data/list.ts | 134 +++++++ 7 files changed, 663 insertions(+), 2 deletions(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/create.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/list.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 6a069e3fd5a3..ce7f1de623ed 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -1,5 +1,5 @@ import type { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type'; -import type { EvalDatasetCollectionSchemaType } from './type'; +import type { EvalDatasetCollectionSchemaType, EvalDatasetDataSchemaType } from './type'; export type listEvaluationsBody = PaginationProps<{ searchKey?: string; @@ -44,3 +44,40 @@ export type listEvalDatasetCollectionResponse = PaginationResponse< creatorName?: string; } >; + +export type importEvalDatasetFromFileBody = { + fileId: string; + collectionId: string; + enableQualityEvaluation: boolean; + qualityEvaluationModel?: string; +}; + +export type createEvalDatasetDataBody = { + collectionId: string; + user_input: string; + actual_output?: string; + expected_output: string; + context?: string[]; + retrieval_context?: string[]; +}; + +export type listEvalDatasetDataBody = PaginationProps<{ + collectionId: string; + searchKey?: string; +}>; + +export type listEvalDatasetDataResponse = PaginationResponse< + Pick< + EvalDatasetDataSchemaType, + | '_id' + | 'user_input' + | 'actual_output' + | 'expected_output' + | 'context' + | 'retrieval_context' + | 'metadata' + | 'createFrom' + | 'createTime' + | 'updateTime' + > +>; diff --git a/packages/global/core/evaluation/constants.ts b/packages/global/core/evaluation/constants.ts index 5624267718e7..a7c42066d559 100644 --- a/packages/global/core/evaluation/constants.ts +++ b/packages/global/core/evaluation/constants.ts @@ -20,3 +20,11 @@ export const EvaluationStatusMap = { } }; export const EvaluationStatusValues = Object.keys(EvaluationStatusMap).map(Number); + +export enum EvalDatasetDataCreateFromEnum { + manual = 'manual', + fileImport = 'file_import', + intelligentGeneration = 'intelligent_generation' +} + +export const EvalDatasetDataCreateFromValues = Object.values(EvalDatasetDataCreateFromEnum); diff --git a/packages/global/core/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts index aeadae7e2e19..6e3681d17aad 100644 --- a/packages/global/core/evaluation/type.d.ts +++ b/packages/global/core/evaluation/type.d.ts @@ -1,4 +1,4 @@ -import type { EvaluationStatusEnum } from './constants'; +import type { EvaluationStatusEnum, EvalDatasetDataCreateFromEnum } from './constants'; export type EvaluationSchemaType = { _id: string; @@ -71,6 +71,7 @@ export type EvalDatasetDataSchemaType = { context: string[]; retrieval_context: string[]; metadata: Record; + createFrom: EvalDatasetDataCreateFromEnum; createTime: Date; updateTime: Date; }; diff --git a/packages/service/core/evaluation/evalDatasetDataSchema.ts b/packages/service/core/evaluation/evalDatasetDataSchema.ts index 5e6424e75635..97a771cf536b 100644 --- a/packages/service/core/evaluation/evalDatasetDataSchema.ts +++ b/packages/service/core/evaluation/evalDatasetDataSchema.ts @@ -1,6 +1,10 @@ import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; import { connectionMongo, getMongoModel } from '../../common/mongo'; import { EvalDatasetCollectionName } from './evalDatasetCollectionSchema'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataCreateFromValues +} from '@fastgpt/global/core/evaluation/constants'; const { Schema } = connectionMongo; @@ -54,6 +58,13 @@ const EvalDatasetDataSchema = new Schema({ type: Schema.Types.Mixed, default: {} }, + createFrom: { + type: String, + enum: EvalDatasetDataCreateFromValues, + default: EvalDatasetDataCreateFromEnum.manual, + required: true, + index: true + }, createTime: { type: Date, default: Date.now, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts new file mode 100644 index 000000000000..5828132e5c7f --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts @@ -0,0 +1,122 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { createEvalDatasetDataBody } from '@fastgpt/global/core/evaluation/api'; + +export type EvalDatasetDataCreateQuery = {}; +export type EvalDatasetDataCreateBody = createEvalDatasetDataBody; +export type EvalDatasetDataCreateResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { collectionId, user_input, actual_output, expected_output, context, retrieval_context } = + req.body; + + if (!collectionId || typeof collectionId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'collectionId is required and must be a string' + }); + } + + if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'user_input is required and must be a non-empty string' + }); + } + + if ( + !expected_output || + typeof expected_output !== 'string' || + expected_output.trim().length === 0 + ) { + return Promise.reject({ + statusCode: 400, + message: 'expected_output is required and must be a non-empty string' + }); + } + + if (actual_output !== undefined && typeof actual_output !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'actual_output must be a string if provided' + }); + } + + if ( + context !== undefined && + (!Array.isArray(context) || !context.every((item) => typeof item === 'string')) + ) { + return Promise.reject({ + statusCode: 400, + message: 'context must be an array of strings if provided' + }); + } + + if ( + retrieval_context !== undefined && + (!Array.isArray(retrieval_context) || + !retrieval_context.every((item) => typeof item === 'string')) + ) { + return Promise.reject({ + statusCode: 400, + message: 'retrieval_context must be an array of strings if provided' + }); + } + + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + // Verify collection exists and belongs to the team + const collection = await MongoEvalDatasetCollection.findOne({ + _id: collectionId, + teamId + }); + + if (!collection) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset collection not found or access denied' + }); + } + + const dataId = await mongoSessionRun(async (session) => { + const [{ _id }] = await MongoEvalDatasetData.create( + [ + { + datasetId: collectionId, + user_input: user_input.trim(), + actual_output: actual_output?.trim() || '', + expected_output: expected_output.trim(), + context: context || [], + retrieval_context: retrieval_context || [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session, ordered: true } + ); + + return _id; + }); + + // TODO: Add audit log for data creation + // TODO: Add tracking for data creation metrics + + return dataId.toString(); +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts new file mode 100644 index 000000000000..20af42ae0cdb --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -0,0 +1,348 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller'; +import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; +import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; +import type { importEvalDatasetFromFileBody } from '@fastgpt/global/core/evaluation/api'; + +export type EvalDatasetImportFromFileQuery = {}; +export type EvalDatasetImportFromFileBody = importEvalDatasetFromFileBody; +export type EvalDatasetImportFromFileResponse = string; + +const REQUIRED_CSV_COLUMNS = ['user_input', 'expected_output'] as const; + +const OPTIONAL_CSV_COLUMNS = ['actual_output', 'context', 'retrieval_context', 'metadata'] as const; + +const CSV_COLUMNS = [...REQUIRED_CSV_COLUMNS, ...OPTIONAL_CSV_COLUMNS] as const; + +interface CSVRow { + user_input: string; + expected_output: string; + actual_output?: string; + context?: string; + retrieval_context?: string; + metadata?: string; +} + +function parseCSVLine(line: string): string[] { + const result: string[] = []; + let current = ''; + let inQuotes = false; + + for (let i = 0; i < line.length; i++) { + const char = line[i]; + + if (char === '"') { + if (inQuotes && line[i + 1] === '"') { + // Escaped quote + current += '"'; + i++; + } else { + // Toggle quote state + inQuotes = !inQuotes; + } + } else if (char === ',' && !inQuotes) { + // End of field + result.push(current.trim()); + current = ''; + } else { + current += char; + } + } + + // Add the last field + result.push(current.trim()); + return result; +} + +function parseCSVContent(csvContent: string): CSVRow[] { + const lines = csvContent.split('\n').filter((line) => line.trim()); + + if (lines.length === 0) { + throw new Error('CSV file is empty'); + } + + // Parse header + const headerLine = lines[0]; + const headers = parseCSVLine(headerLine).map((h) => h.replace(/^"|"$/g, '')); + + // Validate CSV structure + const missingColumns = REQUIRED_CSV_COLUMNS.filter((col) => !headers.includes(col)); + if (missingColumns.length > 0) { + throw new Error(`CSV file is missing required columns: ${missingColumns.join(', ')}`); + } + + // Create column index mapping + const columnIndexes: Record = {}; + CSV_COLUMNS.forEach((col) => { + const index = headers.indexOf(col); + if (index !== -1) { + columnIndexes[col] = index; + } + }); + + // Parse data rows + const rows: CSVRow[] = []; + for (let i = 1; i < lines.length; i++) { + const line = lines[i].trim(); + if (!line) continue; // Skip empty lines + + const fields = parseCSVLine(line); + + if (fields.length !== headers.length) { + throw new Error(`Row ${i + 1}: Expected ${headers.length} columns, got ${fields.length}`); + } + + const row: CSVRow = { + user_input: fields[columnIndexes.user_input]?.replace(/^"|"$/g, '') || '', + expected_output: fields[columnIndexes.expected_output]?.replace(/^"|"$/g, '') || '' + }; + + // Add optional fields + if (columnIndexes.actual_output !== undefined) { + row.actual_output = fields[columnIndexes.actual_output]?.replace(/^"|"$/g, '') || ''; + } + if (columnIndexes.context !== undefined) { + row.context = fields[columnIndexes.context]?.replace(/^"|"$/g, '') || ''; + } + if (columnIndexes.retrieval_context !== undefined) { + row.retrieval_context = fields[columnIndexes.retrieval_context]?.replace(/^"|"$/g, '') || ''; + } + if (columnIndexes.metadata !== undefined) { + row.metadata = fields[columnIndexes.metadata]?.replace(/^"|"$/g, '') || '{}'; + } + + rows.push(row); + } + + return rows; +} + +async function handler( + req: ApiRequestProps +): Promise { + const { + fileId, + collectionId: datasetCollectionId, + enableQualityEvaluation, + qualityEvaluationModel + } = req.body; + + if (!fileId || typeof fileId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'fileId is required and must be a string' + }); + } + + if (!datasetCollectionId || typeof datasetCollectionId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'datasetCollectionId is required and must be a string' + }); + } + + if (typeof enableQualityEvaluation !== 'boolean') { + return Promise.reject({ + statusCode: 400, + message: 'enableQualityEvaluation is required and must be a boolean' + }); + } + + if ( + enableQualityEvaluation && + (!qualityEvaluationModel || typeof qualityEvaluationModel !== 'string') + ) { + return Promise.reject({ + statusCode: 400, + message: 'qualityEvaluationModel is required when enableQualityEvaluation is true' + }); + } + + const { teamId, tmbId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const { file } = await authCollectionFile({ + req, + authToken: true, + authApiKey: true, + fileId, + per: WritePermissionVal + }); + + const filename = file.filename?.toLowerCase() || ''; + if (!filename.endsWith('.csv')) { + return Promise.reject({ + statusCode: 400, + message: 'File must be a CSV file' + }); + } + + // Verify dataset collection exists and belongs to team + const datasetCollection = await MongoEvalDatasetCollection.findById(datasetCollectionId); + if (!datasetCollection) { + return Promise.reject({ + statusCode: 404, + message: 'Evaluation dataset collection not found' + }); + } + + if (String(datasetCollection.teamId) !== teamId) { + return Promise.reject({ + statusCode: 403, + message: 'No permission to access this dataset collection' + }); + } + + try { + // Read and parse CSV file + const { rawText } = await readFileContentFromMongo({ + teamId, + tmbId, + bucketName: BucketNameEnum.dataset, + fileId, + getFormatText: false + }); + + const csvRows = parseCSVContent(rawText); + + if (csvRows.length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'CSV file contains no data rows' + }); + } + + // Validate row limit (prevent memory issues) + if (csvRows.length > 10000) { + return Promise.reject({ + statusCode: 400, + message: 'CSV file cannot contain more than 10,000 rows' + }); + } + + // Prepare data for bulk insert + const evalDatasetRecords = csvRows.map((row) => { + // Parse context arrays + let contextArray: string[] = []; + let retrievalContextArray: string[] = []; + let metadataObj: Record = {}; + + // Parse context (optional) + if (row.context !== undefined && row.context) { + try { + const parsed = JSON.parse(row.context); + if (Array.isArray(parsed)) { + contextArray = parsed.filter((item) => typeof item === 'string'); + } else if (typeof parsed === 'string') { + contextArray = [parsed]; + } + } catch { + // If not JSON, treat as single string + contextArray = [row.context]; + } + } + + // Parse retrieval_context (optional) + if (row.retrieval_context !== undefined && row.retrieval_context) { + try { + const parsed = JSON.parse(row.retrieval_context); + if (Array.isArray(parsed)) { + retrievalContextArray = parsed.filter((item) => typeof item === 'string'); + } else if (typeof parsed === 'string') { + retrievalContextArray = [parsed]; + } + } catch { + // If not JSON, treat as single string + retrievalContextArray = [row.retrieval_context]; + } + } + + // Parse metadata (optional) + if (row.metadata !== undefined && row.metadata) { + try { + const parsed = JSON.parse(row.metadata); + if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) { + metadataObj = parsed; + } + } catch { + // Invalid JSON, use empty object + metadataObj = {}; + } + } + + return { + datasetId: datasetCollectionId, + user_input: row.user_input, + expected_output: row.expected_output, + actual_output: row.actual_output || '', + context: contextArray, + retrieval_context: retrievalContextArray, + metadata: metadataObj, + createFrom: EvalDatasetDataCreateFromEnum.fileImport + }; + }); + + // Bulk insert evaluation dataset data + await mongoSessionRun(async (session) => { + await MongoEvalDatasetData.insertMany(evalDatasetRecords, { + session, + ordered: false // Continue if some documents fail + }); + }); + + // TODO: Add to quality evaluation queue if enabled + if (enableQualityEvaluation && qualityEvaluationModel) { + // Queue implementation would go here + // This would involve: + // 1. Creating evaluation tasks for each imported row + // 2. Adding tasks to evaluation queue + // 3. Handling queue processing for quality assessment + // 4. Updating evaluation results back to database + // 5. Implementing billing logic for evaluation + console.log('Quality evaluation queuing not implemented yet'); + } + + // TODO: Add audit log for import operation + // This would track: + // - Who performed the import + // - What file was imported + // - How many records were imported + // - When the import occurred + // - Success/failure status + + // TODO: Add tracking for import metrics + // This would include: + // - Import performance metrics + // - Success/failure rates + // - Data volume statistics + + return 'success'; + } catch (error: any) { + // Handle parsing errors + if (error.message && typeof error.message === 'string') { + return Promise.reject({ + statusCode: 400, + message: `CSV parsing error: ${error.message}` + }); + } + + // Re-throw other errors + throw error; + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts new file mode 100644 index 000000000000..efcb13bdb5fd --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts @@ -0,0 +1,134 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { + listEvalDatasetDataBody, + listEvalDatasetDataResponse +} from '@fastgpt/global/core/evaluation/api'; +import { replaceRegChars } from '@fastgpt/global/common/string/tools'; + +async function handler( + req: ApiRequestProps +): Promise { + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + + // Parse request parameters + const { offset, pageSize } = parsePaginationRequest(req); + const { collectionId, searchKey } = req.body; + + // Validate required parameters + if (!collectionId) { + throw new Error('Collection ID is required'); + } + + // TODO: Audit Log - Log request attempt with parameters + console.log(`[AUDIT] User requested eval dataset data list for collection: ${collectionId}`); + + // Verify collection exists and belongs to team + const collection = await MongoEvalDatasetCollection.findOne({ + _id: new Types.ObjectId(collectionId), + teamId: new Types.ObjectId(teamId) + }); + + if (!collection) { + throw new Error('Collection not found or access denied'); + } + + // Build MongoDB match criteria + const match: Record = { + datasetId: new Types.ObjectId(collectionId) + }; + + // Add search filter if provided + if (searchKey && typeof searchKey === 'string' && searchKey.trim().length > 0) { + const searchRegex = new RegExp(`${replaceRegChars(searchKey.trim())}`, 'i'); + match.$or = [ + { user_input: { $regex: searchRegex } }, + { expected_output: { $regex: searchRegex } }, + { actual_output: { $regex: searchRegex } } + ]; + } + + try { + // TODO: Performance Tracking - Log query execution time + const startTime = Date.now(); + + // Execute aggregation with pagination + const [dataList, total] = await Promise.all([ + MongoEvalDatasetData.aggregate(buildPipeline(match, offset, pageSize)), + MongoEvalDatasetData.countDocuments(match) + ]); + + // TODO: Performance Tracking - Log query completion time + const executionTime = Date.now() - startTime; + console.log(`[PERFORMANCE] Query executed in ${executionTime}ms`); + + // TODO: Audit Log - Log successful response + console.log(`[AUDIT] Successfully returned ${dataList.length} items out of ${total} total`); + + return { + total, + list: dataList.map((item) => ({ + _id: String(item._id), + user_input: item.user_input, + actual_output: item.actual_output || '', + expected_output: item.expected_output, + context: item.context || [], + retrieval_context: item.retrieval_context || [], + metadata: item.metadata || {}, + createFrom: item.createFrom, + createTime: item.createTime, + updateTime: item.updateTime + })) + }; + } catch (error) { + // TODO: Error Tracking - Log detailed error information + console.error('[ERROR] Database error in eval dataset data list:', error); + + // TODO: Audit Log - Log failed request + console.log( + `[AUDIT] Failed to retrieve eval dataset data list for collection: ${collectionId}` + ); + + throw error; + } +} + +/** + * Build MongoDB aggregation pipeline + */ +const buildPipeline = (match: Record, offset: number, pageSize: number) => [ + { $match: match }, + { $sort: { createTime: -1 as const } }, + { $skip: offset }, + { $limit: pageSize }, + { + $project: { + _id: 1, + user_input: 1, + actual_output: 1, + expected_output: 1, + context: 1, + retrieval_context: 1, + metadata: 1, + createFrom: 1, + createTime: 1, + updateTime: 1 + } + } +]; + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; From 0b9bc8d51da7d716d863d853a428ffd08e95e75c Mon Sep 17 00:00:00 2001 From: Jon Date: Sat, 23 Aug 2025 18:07:16 +0800 Subject: [PATCH 007/255] feat: Add dataset quality assessment job handling system --- packages/global/core/evaluation/api.d.ts | 5 + packages/service/common/bullmq/index.ts | 1 + .../service/core/evaluation/dataQualityMq.ts | 93 +++++++++ .../core/evaluation/dataQualityProcessor.ts | 180 ++++++++++++++++++ packages/service/core/evaluation/index.ts | 5 +- .../dataset/data/qualityAssessment.ts | 98 ++++++++++ 6 files changed, 381 insertions(+), 1 deletion(-) create mode 100644 packages/service/core/evaluation/dataQualityMq.ts create mode 100644 packages/service/core/evaluation/dataQualityProcessor.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index ce7f1de623ed..6590901c0f8c 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -81,3 +81,8 @@ export type listEvalDatasetDataResponse = PaginationResponse< | 'updateTime' > >; + +export type qualityAssessmentBody = { + dataId: string; + evalModel: string; +}; diff --git a/packages/service/common/bullmq/index.ts b/packages/service/common/bullmq/index.ts index ddd6f2b8f224..e226eebcdc41 100644 --- a/packages/service/common/bullmq/index.ts +++ b/packages/service/common/bullmq/index.ts @@ -21,6 +21,7 @@ const defaultWorkerOpts: Omit = { export enum QueueNames { datasetSync = 'datasetSync', evaluation = 'evaluation', + evalDatasetDataQuality = 'evalDatasetDataQuality', // abondoned websiteSync = 'websiteSync' } diff --git a/packages/service/core/evaluation/dataQualityMq.ts b/packages/service/core/evaluation/dataQualityMq.ts new file mode 100644 index 000000000000..68b23238ddf1 --- /dev/null +++ b/packages/service/core/evaluation/dataQualityMq.ts @@ -0,0 +1,93 @@ +import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; +import { type Processor } from 'bullmq'; +import { addLog } from '../../common/system/log'; + +export type EvalDatasetDataQualityData = { + dataId: string; + evalModel: string; +}; + +export const evalDatasetDataQualityQueue = getQueue( + QueueNames.evalDatasetDataQuality, + { + defaultJobOptions: { + attempts: 3, + backoff: { + type: 'exponential', + delay: 1000 + } + } + } +); + +const concurrency = process.env.EVAL_DATA_QUALITY_CONCURRENCY + ? Number(process.env.EVAL_DATA_QUALITY_CONCURRENCY) + : 2; + +export const getEvalDatasetDataQualityWorker = ( + processor: Processor +) => { + return getWorker(QueueNames.evalDatasetDataQuality, processor, { + removeOnFail: { + count: 1000 // Keep last 1000 failed jobs + }, + concurrency: concurrency + }); +}; + +export const addEvalDatasetDataQualityJob = (data: EvalDatasetDataQualityData) => { + const dataId = String(data.dataId); + + return evalDatasetDataQualityQueue.add(dataId, data, { deduplication: { id: dataId } }); +}; + +export const checkEvalDatasetDataQualityJobActive = async (dataId: string): Promise => { + try { + const jobId = await evalDatasetDataQualityQueue.getDeduplicationJobId(String(dataId)); + if (!jobId) return false; + + const job = await evalDatasetDataQualityQueue.getJob(jobId); + if (!job) return false; + + const jobState = await job.getState(); + return ['waiting', 'delayed', 'prioritized', 'active'].includes(jobState); + } catch (error) { + addLog.error('Failed to check eval dataset data quality job status', { dataId, error }); + return false; + } +}; + +export const removeEvalDatasetDataQualityJob = async (dataId: string): Promise => { + const formatDataId = String(dataId); + try { + const jobId = await evalDatasetDataQualityQueue.getDeduplicationJobId(formatDataId); + if (!jobId) { + addLog.warn('No job found to remove', { dataId }); + return false; + } + + const job = await evalDatasetDataQualityQueue.getJob(jobId); + if (!job) { + addLog.warn('Job not found in queue', { dataId, jobId }); + return false; + } + + const jobState = await job.getState(); + + if (['waiting', 'delayed', 'prioritized'].includes(jobState)) { + await job.remove(); + addLog.info('Eval dataset data quality job removed successfully', { + dataId, + jobId, + jobState + }); + return true; + } else { + addLog.warn('Cannot remove active or completed job', { dataId, jobId, jobState }); + return false; + } + } catch (error) { + addLog.error('Failed to remove eval dataset data quality job', { dataId, error }); + return false; + } +}; diff --git a/packages/service/core/evaluation/dataQualityProcessor.ts b/packages/service/core/evaluation/dataQualityProcessor.ts new file mode 100644 index 000000000000..361225fa00d6 --- /dev/null +++ b/packages/service/core/evaluation/dataQualityProcessor.ts @@ -0,0 +1,180 @@ +import type { Job } from 'bullmq'; +import { addLog } from '../../common/system/log'; +import { MongoEvalDatasetData } from './evalDatasetDataSchema'; +import type { EvalDatasetDataQualityData } from './dataQualityMq'; + +// FastAPI service interface schemas +export type InputData = { + user_input?: string; + actual_output?: string; + expected_output?: string; + context?: string[]; + retrieval_context?: string[]; + metadata?: Record; +}; + +export type ModelConfig = { + name: string; + base_url?: string; + api_key?: string; + parameters?: Record; + timeout?: number; +}; + +export type MetricConfig = { + metric_name: string; + prompt?: string; +}; + +export type EvaluationRequest = { + llm_config: ModelConfig; + embedding_config: ModelConfig; + metric_config: MetricConfig; + input_data: InputData; +}; + +export type EvaluationResult = { + metric_name: string; + score: number; + reason?: string; + run_logs?: Record; +}; + +export type Usage = { + model_type: 'llm' | 'embed'; + prompt_tokens?: number; + completion_tokens?: number; + total_tokens?: number; +}; + +export type EvaluationResponse = { + request_id: string; + status: 'success' | 'failed'; + data?: EvaluationResult; + usages?: Usage[]; + error?: string; +}; +// TODO: function to simulate calling the FastAPI microservice +async function mockEvaluationService(request: EvaluationRequest): Promise { + addLog.info('Mock: Calling evaluation microservice', { + request_id: request.input_data.metadata?.request_id + }); + + // Simulate API delay + await new Promise((resolve) => setTimeout(resolve, 1000 + Math.random() * 2000)); + + // Mock successful response with random score + const mockScore = 0.6 + Math.random() * 0.4; // Score between 0.6 and 1.0 + + return { + request_id: request.input_data.metadata?.request_id || 'mock-request-id', + status: 'success', + data: { + metric_name: request.metric_config.metric_name, + score: Math.round(mockScore * 100) / 100, + reason: `Mock evaluation result for ${request.metric_config.metric_name}`, + run_logs: { + mock: true, + timestamp: new Date().toISOString(), + model: request.llm_config.name + } + }, + usages: [ + { + model_type: 'llm', + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150 + } + ] + }; +} + +// Queue processor function +export const processEvalDatasetDataQuality = async (job: Job) => { + const { dataId: DataId, evalModel } = job.data; + + addLog.info('Processing eval dataset data quality job', { DataId, evalModel }); + + try { + await MongoEvalDatasetData.findByIdAndUpdate(DataId, { + $set: { + 'metadata.qualityStatus': 'evaluating', + 'metadata.qualityStartTime': new Date() + } + }); + + const datasetData = await MongoEvalDatasetData.findById(DataId); + if (!datasetData) { + throw new Error(`Dataset data not found: ${DataId}`); + } + + // Prepare the evaluation request + const evaluationRequest: EvaluationRequest = { + llm_config: { + name: evalModel, + parameters: { + temperature: 0.7, + max_tokens: 1000 + }, + timeout: 600 + }, + embedding_config: { + name: 'text-embedding-ada-002', + timeout: 600 + }, + metric_config: { + metric_name: 'quality_assessment' + }, + input_data: { + user_input: datasetData.user_input, + actual_output: datasetData.actual_output, + expected_output: datasetData.expected_output, + context: datasetData.context, + retrieval_context: datasetData.retrieval_context, + metadata: { + ...datasetData.metadata, + request_id: `${DataId}-${Date.now()}` + } + } + }; + + // Call mock evaluation service + const evaluationResult = await mockEvaluationService(evaluationRequest); + + if (evaluationResult.status === 'success' && evaluationResult.data) { + // Update dataset data with successful evaluation result + await MongoEvalDatasetData.findByIdAndUpdate(DataId, { + $set: { + 'metadata.qualityStatus': 'completed', + 'metadata.qualityScore': evaluationResult.data.score, + 'metadata.qualityReason': evaluationResult.data.reason, + 'metadata.qualityRunLogs': evaluationResult.data.run_logs, + 'metadata.qualityUsages': evaluationResult.usages, + 'metadata.qualityFinishTime': new Date(), + 'metadata.qualityModel': evalModel + } + }); + + addLog.info('Eval dataset data quality job completed successfully', { + DataId, + score: evaluationResult.data.score + }); + } else { + throw new Error(evaluationResult.error || 'Evaluation failed'); + } + } catch (error) { + addLog.error('Eval dataset data quality job failed', { DataId, error }); + + // Update status to failed + await MongoEvalDatasetData.findByIdAndUpdate(DataId, { + $set: { + 'metadata.qualityStatus': 'failed', + 'metadata.qualityError': error instanceof Error ? error.message : 'Unknown error', + 'metadata.qualityFinishTime': new Date() + } + }); + + throw error; + } +}; diff --git a/packages/service/core/evaluation/index.ts b/packages/service/core/evaluation/index.ts index 7f2156c94148..ced47770d5bd 100644 --- a/packages/service/core/evaluation/index.ts +++ b/packages/service/core/evaluation/index.ts @@ -40,6 +40,8 @@ import { delay } from '@fastgpt/global/common/system/utils'; import { removeDatasetCiteText } from '../../core/ai/utils'; import { getUserChatInfoAndAuthTeamPoints } from '../../support/permission/auth/team'; import { getRunningUserInfoByTmbId } from '../../support/user/team/utils'; +import { getEvalDatasetDataQualityWorker } from './dataQualityMq'; +import { processEvalDatasetDataQuality } from './dataQualityProcessor'; type AppContextType = { appData: AppSchema; @@ -52,7 +54,8 @@ type AppContextType = { export const initEvaluationWorker = () => { addLog.info('Init Evaluation Worker...'); - return getEvaluationWorker(processor); + getEvalDatasetDataQualityWorker(processEvalDatasetDataQuality); + getEvaluationWorker(processor); }; const dealAiPointCheckError = async (evalId: string, error: any) => { diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts new file mode 100644 index 000000000000..c386ccc3a72d --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts @@ -0,0 +1,98 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { + addEvalDatasetDataQualityJob, + removeEvalDatasetDataQualityJob, + checkEvalDatasetDataQualityJobActive +} from '@fastgpt/service/core/evaluation/dataQualityMq'; +import type { qualityAssessmentBody } from '@fastgpt/global/core/evaluation/api'; + +export type QualityAssessmentQuery = {}; +export type QualityAssessmentBody = qualityAssessmentBody; +export type QualityAssessmentResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { dataId, evalModel } = req.body; + + if (!dataId || typeof dataId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'dataId is required and must be a string' + }); + } + + if (!evalModel || typeof evalModel !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'evalModel is required and must be a string' + }); + } + + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const datasetData = await MongoEvalDatasetData.findById(dataId); + if (!datasetData) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset data not found' + }); + } + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: datasetData.datasetId, + teamId + }); + + if (!collection) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset collection not found or access denied' + }); + } + + try { + const isJobActive = await checkEvalDatasetDataQualityJobActive(dataId); + if (isJobActive) { + await removeEvalDatasetDataQualityJob(dataId); + } + + await addEvalDatasetDataQualityJob({ + dataId: dataId, + evalModel: evalModel + }); + + await MongoEvalDatasetData.findByIdAndUpdate(dataId, { + $set: { + 'metadata.qualityStatus': 'queuing', + 'metadata.qualityModel': evalModel, + 'metadata.qualityQueueTime': new Date() + } + }); + + // TODO: Add audit log for quality assessment request + // TODO: Add tracking for quality assessment metrics + + return 'success'; + } catch (error) { + return Promise.reject({ + statusCode: 500, + message: error instanceof Error ? error.message : 'Failed to queue quality assessment job' + }); + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; From e2e26bd83405acadd30ce49cfd4405adbc80118a Mon Sep 17 00:00:00 2001 From: Jon Date: Mon, 25 Aug 2025 17:01:39 +0800 Subject: [PATCH 008/255] feat: Add data quality evaluation job for dataset import --- .../core/evaluation/dataset/data/fileId.ts | 35 +++++++------------ 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts index 20af42ae0cdb..9ea24423fbc6 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -10,6 +10,7 @@ import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/co import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; import type { importEvalDatasetFromFileBody } from '@fastgpt/global/core/evaluation/api'; +import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataQualityMq'; export type EvalDatasetImportFromFileQuery = {}; export type EvalDatasetImportFromFileBody = importEvalDatasetFromFileBody; @@ -293,39 +294,27 @@ async function handler( }; }); - // Bulk insert evaluation dataset data - await mongoSessionRun(async (session) => { - await MongoEvalDatasetData.insertMany(evalDatasetRecords, { + // Bulk insert evaluation dataset data and get inserted documents + const insertedRecords = await mongoSessionRun(async (session) => { + return await MongoEvalDatasetData.insertMany(evalDatasetRecords, { session, ordered: false // Continue if some documents fail }); }); - // TODO: Add to quality evaluation queue if enabled + // Add to quality evaluation queue if enabled if (enableQualityEvaluation && qualityEvaluationModel) { - // Queue implementation would go here - // This would involve: - // 1. Creating evaluation tasks for each imported row - // 2. Adding tasks to evaluation queue - // 3. Handling queue processing for quality assessment - // 4. Updating evaluation results back to database - // 5. Implementing billing logic for evaluation - console.log('Quality evaluation queuing not implemented yet'); + const evaluationJobs = insertedRecords.map((record) => + addEvalDatasetDataQualityJob({ + dataId: record._id.toString(), + evalModel: qualityEvaluationModel + }) + ); + await Promise.allSettled(evaluationJobs); } // TODO: Add audit log for import operation - // This would track: - // - Who performed the import - // - What file was imported - // - How many records were imported - // - When the import occurred - // - Success/failure status - // TODO: Add tracking for import metrics - // This would include: - // - Import performance metrics - // - Success/failure rates - // - Data volume statistics return 'success'; } catch (error: any) { From fd71849031b99799f65913f13522f89aaae12966 Mon Sep 17 00:00:00 2001 From: Jon Date: Mon, 25 Aug 2025 18:04:44 +0800 Subject: [PATCH 009/255] feat: Add API for updating and deleting evaluation datasets --- packages/global/core/evaluation/api.d.ts | 23 ++- .../core/evaluation/dataset/data/delete.ts | 99 ++++++++++ .../core/evaluation/dataset/data/update.ts | 180 ++++++++++++++++++ 3 files changed, 298 insertions(+), 4 deletions(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/update.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 6590901c0f8c..a1ec45d21710 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -45,15 +45,17 @@ export type listEvalDatasetCollectionResponse = PaginationResponse< } >; -export type importEvalDatasetFromFileBody = { - fileId: string; - collectionId: string; +type QualityEvaluationBase = { enableQualityEvaluation: boolean; qualityEvaluationModel?: string; }; -export type createEvalDatasetDataBody = { +export type importEvalDatasetFromFileBody = { + fileId: string; collectionId: string; +} & QualityEvaluationBase; + +type EvalDatasetDataBase = { user_input: string; actual_output?: string; expected_output: string; @@ -61,6 +63,10 @@ export type createEvalDatasetDataBody = { retrieval_context?: string[]; }; +export type createEvalDatasetDataBody = EvalDatasetDataBase & { + collectionId: string; +}; + export type listEvalDatasetDataBody = PaginationProps<{ collectionId: string; searchKey?: string; @@ -82,7 +88,16 @@ export type listEvalDatasetDataResponse = PaginationResponse< > >; +export type updateEvalDatasetDataBody = EvalDatasetDataBase & + QualityEvaluationBase & { + dataId: string; + }; + export type qualityAssessmentBody = { dataId: string; evalModel: string; }; + +export type deleteEvalDatasetDataQuery = { + dataId: string; +}; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts new file mode 100644 index 000000000000..93cb15d3b0fa --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts @@ -0,0 +1,99 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import type { deleteEvalDatasetDataQuery } from '@fastgpt/global/core/evaluation/api'; +import { + removeEvalDatasetDataQualityJob, + checkEvalDatasetDataQualityJobActive +} from '@fastgpt/service/core/evaluation/dataQualityMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +export type EvalDatasetDataDeleteQuery = deleteEvalDatasetDataQuery; +export type EvalDatasetDataDeleteBody = {}; +export type EvalDatasetDataDeleteResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { dataId } = req.query; + + if (!dataId || typeof dataId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'dataId is required and must be a string' + }); + } + + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + await mongoSessionRun(async (session) => { + const existingData = await MongoEvalDatasetData.findById(dataId).session(session); + + if (!existingData) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset data not found' + }); + } + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: existingData.datasetId, + teamId + }).session(session); + + if (!collection) { + return Promise.reject({ + statusCode: 403, + message: 'Access denied or dataset collection not found' + }); + } + + const hasActiveQualityJob = await checkEvalDatasetDataQualityJobActive(dataId); + + if (hasActiveQualityJob) { + addLog.info('Removing active quality evaluation job before deletion', { + dataId, + teamId + }); + + try { + await removeEvalDatasetDataQualityJob(dataId); + addLog.info('Quality evaluation job removed successfully before deletion', { + dataId, + teamId + }); + } catch (error) { + addLog.error('Failed to remove quality evaluation job before deletion', { + dataId, + teamId, + error + }); + // Continue with deletion even if queue removal fails + } + } + + await MongoEvalDatasetData.deleteOne({ _id: dataId }, { session }); + + addLog.info('Evaluation dataset data deleted successfully', { + dataId, + datasetId: existingData.datasetId, + teamId + }); + }); + + return 'success'; +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts new file mode 100644 index 000000000000..5d032887bb2d --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts @@ -0,0 +1,180 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import type { updateEvalDatasetDataBody } from '@fastgpt/global/core/evaluation/api'; +import { + removeEvalDatasetDataQualityJob, + addEvalDatasetDataQualityJob +} from '@fastgpt/service/core/evaluation/dataQualityMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +export type EvalDatasetDataUpdateQuery = {}; +export type EvalDatasetDataUpdateBody = updateEvalDatasetDataBody; +export type EvalDatasetDataUpdateResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { + dataId, + user_input, + actual_output, + expected_output, + context, + retrieval_context, + enableQualityEvaluation, + qualityEvaluationModel + } = req.body; + + if (!dataId || typeof dataId !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'dataId is required and must be a string' + }); + } + + if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { + return Promise.reject({ + statusCode: 400, + message: 'user_input is required and must be a non-empty string' + }); + } + + if ( + !expected_output || + typeof expected_output !== 'string' || + expected_output.trim().length === 0 + ) { + return Promise.reject({ + statusCode: 400, + message: 'expected_output is required and must be a non-empty string' + }); + } + + if (actual_output !== undefined && typeof actual_output !== 'string') { + return Promise.reject({ + statusCode: 400, + message: 'actual_output must be a string if provided' + }); + } + + if ( + context !== undefined && + (!Array.isArray(context) || !context.every((item) => typeof item === 'string')) + ) { + return Promise.reject({ + statusCode: 400, + message: 'context must be an array of strings if provided' + }); + } + + if ( + retrieval_context !== undefined && + (!Array.isArray(retrieval_context) || + !retrieval_context.every((item) => typeof item === 'string')) + ) { + return Promise.reject({ + statusCode: 400, + message: 'retrieval_context must be an array of strings if provided' + }); + } + + if (typeof enableQualityEvaluation !== 'boolean') { + return Promise.reject({ + statusCode: 400, + message: 'enableQualityEvaluation is required and must be a boolean' + }); + } + + if ( + enableQualityEvaluation && + (!qualityEvaluationModel || typeof qualityEvaluationModel !== 'string') + ) { + return Promise.reject({ + statusCode: 400, + message: 'qualityEvaluationModel is required when enableQualityEvaluation is true' + }); + } + + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + await mongoSessionRun(async (session) => { + const existingData = await MongoEvalDatasetData.findById(dataId).session(session); + + if (!existingData) { + return Promise.reject({ + statusCode: 404, + message: 'Dataset data not found' + }); + } + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: existingData.datasetId, + teamId + }).session(session); + + if (!collection) { + return Promise.reject({ + statusCode: 403, + message: 'Access denied or dataset collection not found' + }); + } + + await MongoEvalDatasetData.updateOne( + { _id: dataId }, + { + user_input: user_input.trim(), + actual_output: actual_output?.trim() || '', + expected_output: expected_output.trim(), + context: context || [], + retrieval_context: retrieval_context || [], + updateTime: new Date() + }, + { session } + ); + + if (enableQualityEvaluation && qualityEvaluationModel) { + try { + // Remove existing quality assessment task if any + await removeEvalDatasetDataQualityJob(dataId); + + // Enqueue new quality assessment task + await addEvalDatasetDataQualityJob({ + dataId, + evalModel: qualityEvaluationModel + }); + + addLog.info('Quality evaluation task enqueued successfully', { + dataId, + evalModel: qualityEvaluationModel, + teamId + }); + } catch (error) { + addLog.error('Failed to manage quality evaluation task', { + dataId, + evalModel: qualityEvaluationModel, + teamId, + error + }); + // Note: We don't throw the error to prevent the update operation from failing + // The data update should succeed even if quality evaluation task management fails + } + } + }); + + return 'success'; +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; From 5be64259da83a8fd695a3c0a227afefbe86bf7a6 Mon Sep 17 00:00:00 2001 From: Jon Date: Tue, 26 Aug 2025 12:25:16 +0800 Subject: [PATCH 010/255] feat: Add teamId and tmbId to evaluation dataset handlers --- .../core/evaluation/evalDatasetDataSchema.ts | 17 ++++++++++++++++- .../core/evaluation/dataset/collection/list.ts | 2 +- .../api/core/evaluation/dataset/data/create.ts | 4 +++- .../api/core/evaluation/dataset/data/delete.ts | 2 +- .../api/core/evaluation/dataset/data/fileId.ts | 2 ++ .../api/core/evaluation/dataset/data/list.ts | 2 +- .../dataset/data/qualityAssessment.ts | 2 +- .../api/core/evaluation/dataset/data/update.ts | 2 +- 8 files changed, 26 insertions(+), 7 deletions(-) diff --git a/packages/service/core/evaluation/evalDatasetDataSchema.ts b/packages/service/core/evaluation/evalDatasetDataSchema.ts index 97a771cf536b..2d3bc8bea896 100644 --- a/packages/service/core/evaluation/evalDatasetDataSchema.ts +++ b/packages/service/core/evaluation/evalDatasetDataSchema.ts @@ -5,12 +5,27 @@ import { EvalDatasetDataCreateFromEnum, EvalDatasetDataCreateFromValues } from '@fastgpt/global/core/evaluation/constants'; +import { + TeamCollectionName, + TeamMemberCollectionName +} from '@fastgpt/global/support/user/team/constant'; const { Schema } = connectionMongo; -export const EvalDatasetDataCollectionName = 'eval_dataset_data'; +export const EvalDatasetDataCollectionName = 'eval_dataset_datas'; const EvalDatasetDataSchema = new Schema({ + teamId: { + type: Schema.Types.ObjectId, + ref: TeamCollectionName, + required: true, + index: true + }, + tmbId: { + type: Schema.Types.ObjectId, + ref: TeamMemberCollectionName, + required: true + }, datasetId: { type: Schema.Types.ObjectId, ref: EvalDatasetCollectionName, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts index f87e6764c9be..e0536e0cad9f 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts @@ -14,7 +14,7 @@ import { replaceRegChars } from '@fastgpt/global/common/string/tools'; async function handler( req: ApiRequestProps ): Promise { - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts index 5828132e5c7f..720adbcc6c55 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts @@ -71,7 +71,7 @@ async function handler( }); } - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, @@ -95,6 +95,8 @@ async function handler( const [{ _id }] = await MongoEvalDatasetData.create( [ { + teamId, + tmbId, datasetId: collectionId, user_input: user_input.trim(), actual_output: actual_output?.trim() || '', diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts index 93cb15d3b0fa..6c2bc1c5051b 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts @@ -28,7 +28,7 @@ async function handler( }); } - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts index 9ea24423fbc6..c4d39d058b1d 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -283,6 +283,8 @@ async function handler( } return { + teamId, + tmbId, datasetId: datasetCollectionId, user_input: row.user_input, expected_output: row.expected_output, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts index efcb13bdb5fd..5dcc72bbfae9 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts @@ -15,7 +15,7 @@ import { replaceRegChars } from '@fastgpt/global/common/string/tools'; async function handler( req: ApiRequestProps ): Promise { - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts index c386ccc3a72d..b99292e3109d 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts @@ -34,7 +34,7 @@ async function handler( }); } - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts index 5d032887bb2d..6d907bd0fd9d 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts @@ -100,7 +100,7 @@ async function handler( }); } - const { teamId } = await authUserPer({ + const { teamId, tmbId } = await authUserPer({ req, authToken: true, authApiKey: true, From a79f6e9b49ec1130caf4ddf2ea355bcd7102317d Mon Sep 17 00:00:00 2001 From: Jon Date: Wed, 27 Aug 2025 17:53:31 +0800 Subject: [PATCH 011/255] feat: Add evaluation dataset synthesis and smart generation functionality --- packages/global/core/evaluation/api.d.ts | 7 + packages/global/core/evaluation/type.d.ts | 2 + packages/service/common/bullmq/index.ts | 2 + .../core/evaluation/dataSynthesizeMq.ts | 89 +++++++++ .../evaluation/dataSynthesizeProcessor.ts | 151 ++++++++++++++ packages/service/core/evaluation/index.ts | 20 ++ .../core/evaluation/smartGenerateMq.ts | 87 ++++++++ .../core/evaluation/smartGenerateProcessor.ts | 188 ++++++++++++++++++ .../evaluation/dataset/collection/create.ts | 25 +-- .../evaluation/dataset/collection/update.ts | 40 +--- .../core/evaluation/dataset/data/create.ts | 35 +--- .../core/evaluation/dataset/data/delete.ts | 15 +- .../evaluation/dataset/data/smartGenerate.ts | 106 ++++++++++ .../core/evaluation/dataset/data/update.ts | 52 ++--- 14 files changed, 687 insertions(+), 132 deletions(-) create mode 100644 packages/service/core/evaluation/dataSynthesizeMq.ts create mode 100644 packages/service/core/evaluation/dataSynthesizeProcessor.ts create mode 100644 packages/service/core/evaluation/smartGenerateMq.ts create mode 100644 packages/service/core/evaluation/smartGenerateProcessor.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index a1ec45d21710..4094fe344598 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -101,3 +101,10 @@ export type qualityAssessmentBody = { export type deleteEvalDatasetDataQuery = { dataId: string; }; + +export type smartGenerateEvalDatasetBody = { + collectionId: string; + datasetCollectionIds: string[]; + count?: number; + intelligentGenerationModel: string; +}; diff --git a/packages/global/core/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts index 6e3681d17aad..f1caa424107d 100644 --- a/packages/global/core/evaluation/type.d.ts +++ b/packages/global/core/evaluation/type.d.ts @@ -64,6 +64,8 @@ export type EvalDatasetCollectionSchemaType = { export type EvalDatasetDataSchemaType = { _id: string; + teamId: string; + tmbId: string; datasetId: string; user_input: string; actual_output: string; diff --git a/packages/service/common/bullmq/index.ts b/packages/service/common/bullmq/index.ts index e226eebcdc41..7f04989161dd 100644 --- a/packages/service/common/bullmq/index.ts +++ b/packages/service/common/bullmq/index.ts @@ -22,6 +22,8 @@ export enum QueueNames { datasetSync = 'datasetSync', evaluation = 'evaluation', evalDatasetDataQuality = 'evalDatasetDataQuality', + evalDatasetSmartGenerate = 'evalDatasetSmartGenerate', + evalDatasetDataSynthesize = 'evalDatasetDataSynthesize', // abondoned websiteSync = 'websiteSync' } diff --git a/packages/service/core/evaluation/dataSynthesizeMq.ts b/packages/service/core/evaluation/dataSynthesizeMq.ts new file mode 100644 index 000000000000..44b38414e2ab --- /dev/null +++ b/packages/service/core/evaluation/dataSynthesizeMq.ts @@ -0,0 +1,89 @@ +import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; +import { type Processor } from 'bullmq'; +import { addLog } from '../../common/system/log'; + +export type EvalDatasetDataSynthesizeData = { + dataId: string; + intelligentGenerationModel: string; + evalDatasetCollectionId: string; +}; + +export const evalDatasetDataSynthesizeQueue = getQueue( + QueueNames.evalDatasetDataSynthesize, + { + defaultJobOptions: { + attempts: 3, + backoff: { + type: 'exponential', + delay: 1000 + } + } + } +); + +const concurrency = process.env.EVAL_DATASET_DATA_SYNTHESIZE_CONCURRENCY + ? Number(process.env.EVAL_DATASET_DATA_SYNTHESIZE_CONCURRENCY) + : 5; + +export const getEvalDatasetDataSynthesizeWorker = ( + processor: Processor +) => { + return getWorker(QueueNames.evalDatasetDataSynthesize, processor, { + removeOnFail: { + count: 1000 // Keep last 1000 failed jobs for debugging + }, + concurrency: concurrency + }); +}; + +export const addEvalDatasetDataSynthesizeJob = (data: EvalDatasetDataSynthesizeData) => { + const jobId = `synthesize-${data.dataId}-${Date.now()}`; + + return evalDatasetDataSynthesizeQueue.add(jobId, data, { + deduplication: { id: jobId } + }); +}; + +export const checkEvalDatasetDataSynthesizeJobActive = async ( + evalDatasetCollectionId: string +): Promise => { + try { + const jobs = await evalDatasetDataSynthesizeQueue.getJobs(['waiting', 'active', 'delayed']); + return jobs.some((job) => job.data.evalDatasetCollectionId === evalDatasetCollectionId); + } catch (error) { + addLog.error('Failed to check eval dataset data synthesize job status', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + return false; + } +}; + +export const removeEvalDatasetDataSynthesizeJobs = async ( + evalDatasetCollectionId: string +): Promise => { + try { + const jobs = await evalDatasetDataSynthesizeQueue.getJobs([ + 'waiting', + 'delayed', + 'prioritized' + ]); + const jobsToRemove = jobs.filter( + (job) => job.data.evalDatasetCollectionId === evalDatasetCollectionId + ); + + await Promise.all(jobsToRemove.map((job) => job.remove())); + + addLog.info('Data synthesize jobs removed successfully', { + evalDatasetCollectionId: evalDatasetCollectionId, + removedCount: jobsToRemove.length + }); + return true; + } catch (error) { + addLog.error('Failed to remove data synthesize jobs', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + return false; + } +}; diff --git a/packages/service/core/evaluation/dataSynthesizeProcessor.ts b/packages/service/core/evaluation/dataSynthesizeProcessor.ts new file mode 100644 index 000000000000..1ba0e4bc0e1b --- /dev/null +++ b/packages/service/core/evaluation/dataSynthesizeProcessor.ts @@ -0,0 +1,151 @@ +import type { Job } from 'bullmq'; +import type { HydratedDocument } from 'mongoose'; +import { addLog } from '../../common/system/log'; +import { MongoEvalDatasetCollection } from './evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from './evalDatasetDataSchema'; +import { MongoDatasetData } from '../dataset/data/schema'; +import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; +import { + type EvalDatasetDataSynthesizeData, + getEvalDatasetDataSynthesizeWorker +} from './dataSynthesizeMq'; + +async function processor(job: Job) { + const { dataId, intelligentGenerationModel, evalDatasetCollectionId } = job.data; + + try { + addLog.info('Starting eval dataset data synthesis', { + dataId, + evalDatasetCollectionId, + intelligentGenerationModel + }); + + const sourceData = await MongoDatasetData.findById(dataId); + if (!sourceData) { + throw new Error(`Source dataset data not found: ${dataId}`); + } + + // TODO: Authentication check + const evalDatasetCollection = + await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); + if (!evalDatasetCollection) { + throw new Error(`Eval dataset not found: ${evalDatasetCollectionId}`); + } + + // TODO: Implement AI model call for synthesis + // This is where we would call the intelligent generation model + // to generate expected_output based on user_input + const synthesizedOutput = await synthesizeExpectedOutput( + sourceData.q, + intelligentGenerationModel + ); + + // Create new evaluation dataset record with synthesized data + const evalData: Partial = { + teamId: evalDatasetCollection.teamId, + tmbId: evalDatasetCollection.tmbId, + datasetId: evalDatasetCollectionId, + user_input: sourceData.q, + expected_output: synthesizedOutput, + actual_output: '', + context: [], + retrieval_context: [], + metadata: { + sourceDataId: sourceData._id, + sourceDatasetId: sourceData.datasetId, + sourceCollectionId: sourceData.collectionId, + generatedAt: new Date(), + synthesizedAt: new Date(), + intelligentGenerationModel + }, + createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration + }; + + const insertedRecord = await MongoEvalDatasetData.create(evalData); + + addLog.info('Completed data synthesis', { + dataId, + evalDatasetCollectionId, + insertedRecordId: insertedRecord._id, + synthesizedLength: synthesizedOutput.length + }); + + await checkAndUpdateDatasetStatus(evalDatasetCollectionId); + + // TODO: Add audit log + // TODO: Add tracking metrics + + return { + success: true, + insertedRecordId: insertedRecord._id, + synthesizedOutput: synthesizedOutput + }; + } catch (error) { + addLog.error('Failed to synthesize eval dataset data', { + dataId, + evalDatasetCollectionId, + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined + }); + + // TODO: Update record status to error + throw error; + } +} + +async function synthesizeExpectedOutput( + userInput: string, + intelligentGenerationModel: string +): Promise { + // TODO: Implement actual AI model call for synthesis + // For now, return a placeholder that would be replaced with real implementation + + const prompt = `Question: ${userInput}\n\nProvide a comprehensive and accurate answer:`; + + // TODO: Replace with actual model API call + // const response = await callAIModel(intelligentGenerationModel, prompt); + // return response.text; + + // For now, return a synthesized placeholder + return `[AI Generated Answer for: ${userInput.substring(0, 100)}${userInput.length > 100 ? '...' : ''}]`; +} + +async function checkAndUpdateDatasetStatus(evalDatasetCollectionId: string) { + try { + const evalDatasetCollection = + await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); + if (!evalDatasetCollection) return; + + const totalGeneratedCount = await MongoEvalDatasetData.countDocuments({ + datasetId: evalDatasetCollectionId, + createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration + }); + + addLog.info('Dataset synthesis status check', { + evalDatasetCollectionId: evalDatasetCollectionId, + totalGeneratedCount, + expectedCount: evalDatasetCollection.dataCountByGen + }); + + // If we have reached expected count, mark as ready + if (totalGeneratedCount >= evalDatasetCollection.dataCountByGen) { + // TODO: Update dataset status to ready/completed + addLog.info('Eval dataset synthesis completed', { + evalDatasetCollectionId: evalDatasetCollectionId, + totalGeneratedCount, + expectedCount: evalDatasetCollection.dataCountByGen + }); + } + } catch (error) { + addLog.error('Failed to check synthesis status', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + } +} + +// Initialize worker +export const initEvalDatasetDataSynthesizeWorker = () => { + return getEvalDatasetDataSynthesizeWorker(processor); +}; diff --git a/packages/service/core/evaluation/index.ts b/packages/service/core/evaluation/index.ts index ced47770d5bd..f95003d83821 100644 --- a/packages/service/core/evaluation/index.ts +++ b/packages/service/core/evaluation/index.ts @@ -42,6 +42,8 @@ import { getUserChatInfoAndAuthTeamPoints } from '../../support/permission/auth/ import { getRunningUserInfoByTmbId } from '../../support/user/team/utils'; import { getEvalDatasetDataQualityWorker } from './dataQualityMq'; import { processEvalDatasetDataQuality } from './dataQualityProcessor'; +import { getEvalDatasetSmartGenerateWorker } from './smartGenerateMq'; +import { getEvalDatasetDataSynthesizeWorker } from './dataSynthesizeMq'; type AppContextType = { appData: AppSchema; @@ -56,6 +58,24 @@ export const initEvaluationWorker = () => { addLog.info('Init Evaluation Worker...'); getEvalDatasetDataQualityWorker(processEvalDatasetDataQuality); getEvaluationWorker(processor); + + import('./smartGenerateProcessor') + .then(({ initEvalDatasetSmartGenerateWorker }) => { + initEvalDatasetSmartGenerateWorker(); + addLog.info('Smart generate worker initialized'); + }) + .catch((error) => { + addLog.error('Failed to init smart generate worker', { error }); + }); + + import('./dataSynthesizeProcessor') + .then(({ initEvalDatasetDataSynthesizeWorker }) => { + initEvalDatasetDataSynthesizeWorker(); + addLog.info('Data synthesize worker initialized'); + }) + .catch((error) => { + addLog.error('Failed to init data synthesize worker', { error }); + }); }; const dealAiPointCheckError = async (evalId: string, error: any) => { diff --git a/packages/service/core/evaluation/smartGenerateMq.ts b/packages/service/core/evaluation/smartGenerateMq.ts new file mode 100644 index 000000000000..aae08d8d53ba --- /dev/null +++ b/packages/service/core/evaluation/smartGenerateMq.ts @@ -0,0 +1,87 @@ +import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; +import { type Processor } from 'bullmq'; +import { addLog } from '../../common/system/log'; + +export type EvalDatasetSmartGenerateData = { + datasetCollectionIds: string[]; + count?: number; + keywords?: string[]; + intelligentGenerationModel: string; + evalDatasetCollectionId: string; +}; + +export const evalDatasetSmartGenerateQueue = getQueue( + QueueNames.evalDatasetSmartGenerate, + { + defaultJobOptions: { + attempts: 3, + backoff: { + type: 'exponential', + delay: 2000 + } + } + } +); + +const concurrency = process.env.EVAL_DATASET_SMART_GENERATE_CONCURRENCY + ? Number(process.env.EVAL_DATASET_SMART_GENERATE_CONCURRENCY) + : 2; + +export const getEvalDatasetSmartGenerateWorker = ( + processor: Processor +) => { + return getWorker(QueueNames.evalDatasetSmartGenerate, processor, { + removeOnFail: { + count: 1000 // Keep last 1000 failed jobs for debugging + }, + concurrency: concurrency + }); +}; + +export const addEvalDatasetSmartGenerateJob = (data: EvalDatasetSmartGenerateData) => { + const jobId = `smartgen-${data.evalDatasetCollectionId}-${Date.now()}`; + + return evalDatasetSmartGenerateQueue.add(jobId, data, { + deduplication: { id: jobId } + }); +}; + +export const checkEvalDatasetSmartGenerateJobActive = async ( + evalDatasetCollectionId: string +): Promise => { + try { + const jobs = await evalDatasetSmartGenerateQueue.getJobs(['waiting', 'active', 'delayed']); + return jobs.some((job) => job.data.evalDatasetCollectionId === evalDatasetCollectionId); + } catch (error) { + addLog.error('Failed to check eval dataset smart generate job status', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + return false; + } +}; + +export const removeEvalDatasetSmartGenerateJobs = async ( + evalDatasetCollectionId: string +): Promise => { + try { + const jobs = await evalDatasetSmartGenerateQueue.getJobs(['waiting', 'delayed', 'prioritized']); + const jobsToRemove = jobs.filter( + (job) => job.data.evalDatasetCollectionId === evalDatasetCollectionId + ); + + await Promise.all(jobsToRemove.map((job) => job.remove())); + + addLog.info('Smart generate jobs removed successfully', { + evalDatasetCollectionId: evalDatasetCollectionId, + removedCount: jobsToRemove.length + }); + return true; + } catch (error) { + addLog.error('Failed to remove smart generate jobs', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + return false; + } +}; diff --git a/packages/service/core/evaluation/smartGenerateProcessor.ts b/packages/service/core/evaluation/smartGenerateProcessor.ts new file mode 100644 index 000000000000..1e0cb40c6bbb --- /dev/null +++ b/packages/service/core/evaluation/smartGenerateProcessor.ts @@ -0,0 +1,188 @@ +import type { Job } from 'bullmq'; +import type { HydratedDocument } from 'mongoose'; +import { addLog } from '../../common/system/log'; +import { MongoEvalDatasetCollection } from './evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from './evalDatasetDataSchema'; +import { MongoDatasetData } from '../dataset/data/schema'; +import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; +import { + type EvalDatasetSmartGenerateData, + getEvalDatasetSmartGenerateWorker +} from './smartGenerateMq'; +import { addEvalDatasetDataSynthesizeJob } from './dataSynthesizeMq'; + +async function processor(job: Job) { + const { datasetCollectionIds, count, intelligentGenerationModel, evalDatasetCollectionId } = + job.data; + + try { + addLog.info('Starting eval dataset smart generation', { + evalDatasetCollectionId, + datasetCollectionIds, + count + }); + + const sampleSize = Number(count); + if (!Number.isInteger(sampleSize) || sampleSize <= 0) { + throw new Error(`Invalid count parameter: ${count}. Must be a positive integer.`); + } + + // TODO: Authentication check - get team and user info from eval dataset + const evalDatasetCollection = + await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); + if (!evalDatasetCollection) { + throw new Error(`Eval dataset collection not found: ${evalDatasetCollectionId}`); + } + + // TODO: Optimize the acquisition of dataset data + const sampleData = await MongoDatasetData.find( + { + teamId: evalDatasetCollection.teamId, + collectionId: { $in: datasetCollectionIds } + }, + 'q a datasetId collectionId' + ) + .sort({ updateTime: -1 }) + .limit(100) + .lean(); + + if (sampleData.length === 0) { + throw new Error('No data found in selected dataset collections'); + } + + addLog.info('Retrieved sample data for generation', { + evalDatasetCollectionId: evalDatasetCollectionId, + sampleCount: sampleData.length + }); + + const generateData: Array> = []; + const synthesisData: Array<{ + dataId: string; + intelligentGenerationModel: string; + evalDatasetCollectionId: string; + }> = []; + + for (const sample of sampleData) { + if (sample.q && sample.a) { + // Direct QA pair - can be used immediately + const evalData: Partial = { + teamId: evalDatasetCollection.teamId, + tmbId: evalDatasetCollection.tmbId, + datasetId: evalDatasetCollectionId, + user_input: sample.q, + expected_output: sample.a, + actual_output: '', + context: [], + retrieval_context: [], + metadata: { + sourceDataId: sample._id, + sourceDatasetId: sample.datasetId, + sourceCollectionId: sample.collectionId, + generatedAt: new Date(), + intelligentGenerationModel + }, + createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration + }; + generateData.push(evalData); + } else if (sample.q && sample.a === '') { + // Only Q - add to synthesis data list (not saved to mongo here) + synthesisData.push({ + dataId: sample._id.toString(), + intelligentGenerationModel, + evalDatasetCollectionId + }); + } + } + + // Bulk insert complete evaluation dataset data + let insertedRecords: HydratedDocument[] = []; + if (generateData.length > 0) { + insertedRecords = await MongoEvalDatasetData.insertMany(generateData, { + ordered: false + }); + + addLog.info('Inserted complete eval dataset data', { + evalDatasetCollectionId: evalDatasetCollectionId, + insertedCount: insertedRecords.length + }); + } + + // Queue synthesis jobs for data that needs processing (synthesisData) + const synthesizeJobs = []; + for (const synthData of synthesisData) { + const synthesizeJob = await addEvalDatasetDataSynthesizeJob(synthData); + synthesizeJobs.push(synthesizeJob); + } + + if (synthesizeJobs.length > 0) { + addLog.info('Queued synthesis jobs', { + evalDatasetCollectionId: evalDatasetCollectionId, + synthesizeJobsCount: synthesizeJobs.length + }); + } + + // If all data is complete (no synthesis needed), update dataset status + if (synthesisData.length === 0) { + await checkAndUpdateDatasetStatus(evalDatasetCollectionId); + } + + // TODO: Add audit log + // TODO: Add tracking metrics + + addLog.info('Completed eval dataset smart generation', { + evalDatasetCollectionId: evalDatasetCollectionId, + generateDataCount: insertedRecords.length, + synthesisDataCount: synthesisData.length, + synthesizeJobsCount: synthesizeJobs.length + }); + + return { + success: true, + generateDataCount: insertedRecords.length, + synthesisDataCount: synthesisData.length, + synthesizeJobsCount: synthesizeJobs.length + }; + } catch (error) { + addLog.error('Failed to process eval dataset smart generation', { + evalDatasetCollectionId: evalDatasetCollectionId, + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined + }); + + // TODO: Update dataset status to error + throw error; + } +} + +async function checkAndUpdateDatasetStatus(evalDatasetCollectionId: string) { + try { + const evalDatasetCollection = + await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); + if (!evalDatasetCollection) return; + + const actualCount = await MongoEvalDatasetData.countDocuments({ + datasetId: evalDatasetCollectionId, + createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration + }); + + if (actualCount >= evalDatasetCollection.dataCountByGen) { + // TODO: Update dataset status to ready/completed + addLog.info('Eval dataset generation completed', { + evalDatasetCollectionId: evalDatasetCollectionId, + actualCount, + expectedCount: evalDatasetCollection.dataCountByGen + }); + } + } catch (error) { + addLog.error('Failed to check dataset status', { + evalDatasetCollectionId: evalDatasetCollectionId, + error + }); + } +} + +// Initialize worker +export const initEvalDatasetSmartGenerateWorker = () => { + return getEvalDatasetSmartGenerateWorker(processor); +}; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts index c5e52b7ba7d3..453b50b61d72 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts @@ -17,31 +17,19 @@ async function handler( // Parameter validation if (!name || typeof name !== 'string' || name.trim().length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + return Promise.reject('Name is required and must be a non-empty string'); } if (name.trim().length > 100) { - return Promise.reject({ - statusCode: 400, - message: 'Name must be less than 100 characters' - }); + return Promise.reject('Name must be less than 100 characters'); } if (description && typeof description !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'Description must be a string' - }); + return Promise.reject('Description must be a string'); } if (description && description.length > 500) { - return Promise.reject({ - statusCode: 400, - message: 'Description must be less than 500 characters' - }); + return Promise.reject('Description must be less than 500 characters'); } // Authentication and authorization @@ -59,10 +47,7 @@ async function handler( }); if (existingDataset) { - return Promise.reject({ - statusCode: 409, - message: 'A dataset with this name already exists' - }); + return Promise.reject('A dataset with this name already exists'); } // Create dataset collection diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts index b19e1552169e..927e086c481c 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts @@ -16,38 +16,23 @@ async function handler( // Parameter validation if (!collectionId || typeof collectionId !== 'string' || collectionId.trim().length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'Collection ID is required and must be a non-empty string' - }); + return Promise.reject('Collection ID is required and must be a non-empty string'); } if (!name || typeof name !== 'string' || name.trim().length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + return Promise.reject('Name is required and must be a non-empty string'); } if (name.trim().length > 100) { - return Promise.reject({ - statusCode: 400, - message: 'Name must be less than 100 characters' - }); + return Promise.reject('Name must be less than 100 characters'); } if (description && typeof description !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'Description must be a string' - }); + return Promise.reject('Description must be a string'); } if (description && description.length > 500) { - return Promise.reject({ - statusCode: 400, - message: 'Description must be less than 500 characters' - }); + return Promise.reject('Description must be less than 500 characters'); } // TODO: Authentication check - verify user is authenticated via cookie or token @@ -68,10 +53,7 @@ async function handler( }); if (!existingCollection) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset collection not found' - }); + return Promise.reject('Dataset collection not found'); } // Check for name conflicts within team (excluding current collection) @@ -82,10 +64,7 @@ async function handler( }); if (nameConflict) { - return Promise.reject({ - statusCode: 500, - message: 'A dataset with this name already exists' - }); + return Promise.reject('A dataset with this name already exists'); } // Update dataset collection @@ -106,10 +85,7 @@ async function handler( return 'success'; } catch (error) { - return Promise.reject({ - statusCode: 500, - message: 'Failed to update dataset collection' - }); + return Promise.reject('Failed to update dataset collection'); } } diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts index 720adbcc6c55..4ff64cf99911 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts @@ -19,17 +19,11 @@ async function handler( req.body; if (!collectionId || typeof collectionId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'collectionId is required and must be a string' - }); + return Promise.reject('collectionId is required and must be a string'); } if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'user_input is required and must be a non-empty string' - }); + return Promise.reject('user_input is required and must be a non-empty string'); } if ( @@ -37,27 +31,18 @@ async function handler( typeof expected_output !== 'string' || expected_output.trim().length === 0 ) { - return Promise.reject({ - statusCode: 400, - message: 'expected_output is required and must be a non-empty string' - }); + return Promise.reject('expected_output is required and must be a non-empty string'); } if (actual_output !== undefined && typeof actual_output !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'actual_output must be a string if provided' - }); + return Promise.reject('actual_output must be a string if provided'); } if ( context !== undefined && (!Array.isArray(context) || !context.every((item) => typeof item === 'string')) ) { - return Promise.reject({ - statusCode: 400, - message: 'context must be an array of strings if provided' - }); + return Promise.reject('context must be an array of strings if provided'); } if ( @@ -65,10 +50,7 @@ async function handler( (!Array.isArray(retrieval_context) || !retrieval_context.every((item) => typeof item === 'string')) ) { - return Promise.reject({ - statusCode: 400, - message: 'retrieval_context must be an array of strings if provided' - }); + return Promise.reject('retrieval_context must be an array of strings if provided'); } const { teamId, tmbId } = await authUserPer({ @@ -85,10 +67,7 @@ async function handler( }); if (!collection) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset collection not found or access denied' - }); + return Promise.reject('Dataset collection not found or access denied'); } const dataId = await mongoSessionRun(async (session) => { diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts index 6c2bc1c5051b..604ac77e0e43 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts @@ -22,10 +22,7 @@ async function handler( const { dataId } = req.query; if (!dataId || typeof dataId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'dataId is required and must be a string' - }); + return Promise.reject('dataId is required and must be a string'); } const { teamId, tmbId } = await authUserPer({ @@ -39,10 +36,7 @@ async function handler( const existingData = await MongoEvalDatasetData.findById(dataId).session(session); if (!existingData) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset data not found' - }); + return Promise.reject('Dataset data not found'); } const collection = await MongoEvalDatasetCollection.findOne({ @@ -51,10 +45,7 @@ async function handler( }).session(session); if (!collection) { - return Promise.reject({ - statusCode: 403, - message: 'Access denied or dataset collection not found' - }); + return Promise.reject('Access denied or dataset collection not found'); } const hasActiveQualityJob = await checkEvalDatasetDataQualityJobActive(dataId); diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts new file mode 100644 index 000000000000..33c6ef30ba01 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts @@ -0,0 +1,106 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoDatasetCollection } from '@fastgpt/service/core/dataset/collection/schema'; +import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema'; +import type { smartGenerateEvalDatasetBody } from '@fastgpt/global/core/evaluation/api'; +import { addEvalDatasetSmartGenerateJob } from '@fastgpt/service/core/evaluation/smartGenerateMq'; + +export type SmartGenerateEvalDatasetQuery = {}; +export type SmartGenerateEvalDatasetBody = smartGenerateEvalDatasetBody; +export type SmartGenerateEvalDatasetResponse = string; + +async function handler( + req: ApiRequestProps +): Promise { + const { collectionId, datasetCollectionIds, count = 0, intelligentGenerationModel } = req.body; + + // Parameter validation + if (!collectionId || typeof collectionId !== 'string') { + return Promise.reject('collectionId is required and must be a string'); + } + + if ( + !datasetCollectionIds || + !Array.isArray(datasetCollectionIds) || + datasetCollectionIds.length === 0 + ) { + return Promise.reject('datasetCollectionIds is required and must be a non-empty array'); + } + + if (count < 1) { + return Promise.reject('count must be large 1'); + } + + if (!intelligentGenerationModel || typeof intelligentGenerationModel !== 'string') { + return Promise.reject('intelligentGenerationModel is required and must be a string'); + } + + const { teamId, tmbId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const evalDatasetCollection = await MongoEvalDatasetCollection.findById(collectionId); + if (!evalDatasetCollection) { + return Promise.reject('Evaluation dataset collection not found'); + } + + if (String(evalDatasetCollection.teamId) !== teamId) { + return Promise.reject('No permission to access this evaluation dataset collection'); + } + + const datasetCollections = await MongoDatasetCollection.find({ + _id: { $in: datasetCollectionIds }, + teamId + }); + + if (datasetCollections.length !== datasetCollectionIds.length) { + return Promise.reject('One or more dataset collections not found or no permission'); + } + + // Calculate total data count from selected collections for validation + const totalDataCount = await MongoDatasetData.countDocuments({ + teamId, + collectionId: { $in: datasetCollectionIds } + }); + + if (totalDataCount === 0) { + return Promise.reject('Selected dataset collections contain no data'); + } + + if (count > totalDataCount) { + return Promise.reject( + `Requested count (${count}) exceeds available data count (${totalDataCount}) in selected collections` + ); + } + + try { + const job = await addEvalDatasetSmartGenerateJob({ + datasetCollectionIds, + count, + intelligentGenerationModel, + evalDatasetCollectionId: collectionId + }); + + await MongoEvalDatasetCollection.findByIdAndUpdate(collectionId, { + $inc: { dataCountByGen: count } + }); + + // TODO: Add audit log for smart generation operation + // TODO: Add tracking metrics for smart generation + + return job.id || 'queued'; + } catch (error: any) { + return Promise.reject(`Failed to queue smart generation: ${error.message}`); + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts index 6d907bd0fd9d..89043618c0e8 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts @@ -31,17 +31,11 @@ async function handler( } = req.body; if (!dataId || typeof dataId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'dataId is required and must be a string' - }); + return Promise.reject('dataId is required and must be a string'); } if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'user_input is required and must be a non-empty string' - }); + return Promise.reject('user_input is required and must be a non-empty string'); } if ( @@ -49,27 +43,18 @@ async function handler( typeof expected_output !== 'string' || expected_output.trim().length === 0 ) { - return Promise.reject({ - statusCode: 400, - message: 'expected_output is required and must be a non-empty string' - }); + return Promise.reject('expected_output is required and must be a non-empty string'); } if (actual_output !== undefined && typeof actual_output !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'actual_output must be a string if provided' - }); + return Promise.reject('actual_output must be a string if provided'); } if ( context !== undefined && (!Array.isArray(context) || !context.every((item) => typeof item === 'string')) ) { - return Promise.reject({ - statusCode: 400, - message: 'context must be an array of strings if provided' - }); + return Promise.reject('context must be an array of strings if provided'); } if ( @@ -77,27 +62,20 @@ async function handler( (!Array.isArray(retrieval_context) || !retrieval_context.every((item) => typeof item === 'string')) ) { - return Promise.reject({ - statusCode: 400, - message: 'retrieval_context must be an array of strings if provided' - }); + return Promise.reject('retrieval_context must be an array of strings if provided'); } if (typeof enableQualityEvaluation !== 'boolean') { - return Promise.reject({ - statusCode: 400, - message: 'enableQualityEvaluation is required and must be a boolean' - }); + return Promise.reject('enableQualityEvaluation is required and must be a boolean'); } if ( enableQualityEvaluation && (!qualityEvaluationModel || typeof qualityEvaluationModel !== 'string') ) { - return Promise.reject({ - statusCode: 400, - message: 'qualityEvaluationModel is required when enableQualityEvaluation is true' - }); + return Promise.reject( + 'qualityEvaluationModel is required when enableQualityEvaluation is true' + ); } const { teamId, tmbId } = await authUserPer({ @@ -111,10 +89,7 @@ async function handler( const existingData = await MongoEvalDatasetData.findById(dataId).session(session); if (!existingData) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset data not found' - }); + return Promise.reject('Dataset data not found'); } const collection = await MongoEvalDatasetCollection.findOne({ @@ -123,10 +98,7 @@ async function handler( }).session(session); if (!collection) { - return Promise.reject({ - statusCode: 403, - message: 'Access denied or dataset collection not found' - }); + return Promise.reject('Access denied or dataset collection not found'); } await MongoEvalDatasetData.updateOne( From 4054cf9c2167e163b14fc2fc021426d12fc78dfc Mon Sep 17 00:00:00 2001 From: Jon Date: Thu, 28 Aug 2025 11:45:16 +0800 Subject: [PATCH 012/255] feat: Add APIs for managing evaluation dataset tasks and statuses --- packages/global/core/evaluation/api.d.ts | 32 ++++++- packages/global/core/evaluation/constants.ts | 7 ++ packages/global/core/evaluation/type.d.ts | 8 +- .../dataset/collection/deleteTask.ts | 87 +++++++++++++++++++ .../dataset/collection/failedTasks.ts | 66 ++++++++++++++ .../evaluation/dataset/collection/list.ts | 66 +++++++++++--- .../dataset/collection/retryTask.ts | 87 +++++++++++++++++++ .../evaluation/dataset/data/smartGenerate.ts | 24 ++--- 8 files changed, 353 insertions(+), 24 deletions(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 4094fe344598..3ace7f5d311c 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -1,5 +1,9 @@ import type { PaginationProps, PaginationResponse } from '@fastgpt/web/common/fetch/type'; -import type { EvalDatasetCollectionSchemaType, EvalDatasetDataSchemaType } from './type'; +import type { + EvalDatasetCollectionSchemaType, + EvalDatasetDataSchemaType, + EvalDatasetCollectionStatus +} from './type'; export type listEvaluationsBody = PaginationProps<{ searchKey?: string; @@ -42,6 +46,7 @@ export type listEvalDatasetCollectionResponse = PaginationResponse< > & { creatorAvatar?: string; creatorName?: string; + status: EvalDatasetCollectionStatus; } >; @@ -108,3 +113,28 @@ export type smartGenerateEvalDatasetBody = { count?: number; intelligentGenerationModel: string; }; + +export type listFailedTasksBody = { + collectionId: string; +}; + +export type listFailedTasksResponse = { + tasks: Array<{ + jobId: string; + dataId: string; + errorMessage: string; + failedAt: Date; + attemptsMade: number; + maxAttempts: number; + }>; +}; + +export type retryTaskBody = { + collectionId: string; + jobId: string; +}; + +export type deleteTaskBody = { + collectionId: string; + jobId: string; +}; diff --git a/packages/global/core/evaluation/constants.ts b/packages/global/core/evaluation/constants.ts index a7c42066d559..5f13d35fd4cb 100644 --- a/packages/global/core/evaluation/constants.ts +++ b/packages/global/core/evaluation/constants.ts @@ -28,3 +28,10 @@ export enum EvalDatasetDataCreateFromEnum { } export const EvalDatasetDataCreateFromValues = Object.values(EvalDatasetDataCreateFromEnum); + +export enum EvalDatasetCollectionStatusEnum { + queued = 'queued', + processing = 'processing', + error = 'error', + ready = 'ready' +} diff --git a/packages/global/core/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts index f1caa424107d..2075bb6d0db5 100644 --- a/packages/global/core/evaluation/type.d.ts +++ b/packages/global/core/evaluation/type.d.ts @@ -1,4 +1,10 @@ -import type { EvaluationStatusEnum, EvalDatasetDataCreateFromEnum } from './constants'; +import type { + EvaluationStatusEnum, + EvalDatasetDataCreateFromEnum, + EvalDatasetCollectionStatusEnum +} from './constants'; + +export type EvalDatasetCollectionStatus = EvalDatasetCollectionStatusEnum; export type EvaluationSchemaType = { _id: string; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts new file mode 100644 index 000000000000..8c03f7be5e5a --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts @@ -0,0 +1,87 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { deleteTaskBody } from '@fastgpt/global/core/evaluation/api'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +async function handler( + req: ApiRequestProps +): Promise<{ success: boolean; message: string }> { + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const { collectionId, jobId } = req.body; + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: new Types.ObjectId(collectionId), + teamId: new Types.ObjectId(teamId) + }); + + if (!collection) { + throw new Error('Evaluation dataset not found or access denied'); + } + + try { + const job = await evalDatasetDataSynthesizeQueue.getJob(jobId); + + if (!job) { + return { + success: false, + message: 'Task not found' + }; + } + + if (job.data.evalDatasetCollectionId !== collectionId) { + return { + success: false, + message: 'Task does not belong to the specified dataset collection' + }; + } + + if (!job.isFailed()) { + return { + success: false, + message: 'Only failed tasks can be deleted' + }; + } + + await job.remove(); + + addLog.info('Task deleted successfully', { + jobId, + collectionId, + teamId + }); + + return { + success: true, + message: 'Task deleted successfully' + }; + } catch (error) { + console.error('Error deleting task:', error); + addLog.error('Failed to delete task', { + jobId, + collectionId, + teamId, + error + }); + + return { + success: false, + message: 'Failed to delete task' + }; + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts new file mode 100644 index 000000000000..87e5828df5d5 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts @@ -0,0 +1,66 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { + listFailedTasksBody, + listFailedTasksResponse +} from '@fastgpt/global/core/evaluation/api'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; + +async function handler( + req: ApiRequestProps +): Promise { + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + + const { collectionId } = req.body; + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: new Types.ObjectId(collectionId), + teamId: new Types.ObjectId(teamId) + }); + + if (!collection) { + throw new Error('Evaluation dataset not found or access denied'); + } + + try { + const failedJobs = await evalDatasetDataSynthesizeQueue.getJobs(['failed']); + const collectionFailedJobs = failedJobs.filter( + (job) => job.data.evalDatasetCollectionId === collectionId + ); + + const tasks = await Promise.all( + collectionFailedJobs.map(async (job) => { + const failureReason = job.failedReason || 'Unknown error'; + return { + jobId: job.id || '', + dataId: job.data.dataId, + errorMessage: failureReason, + failedAt: job.finishedOn ? new Date(job.finishedOn) : new Date(), + attemptsMade: job.attemptsMade, + maxAttempts: job.opts.attempts || 3 + }; + }) + ); + + return { + tasks + }; + } catch (error) { + console.error('Error fetching failed tasks:', error); + throw new Error('Error occurred while fetching failed tasks list'); + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts index e0536e0cad9f..f490a1f31768 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts @@ -9,7 +9,43 @@ import type { listEvalDatasetCollectionBody, listEvalDatasetCollectionResponse } from '@fastgpt/global/core/evaluation/api'; +import type { EvalDatasetCollectionStatus } from '@fastgpt/global/core/evaluation/type'; +import { EvalDatasetCollectionStatusEnum } from '@fastgpt/global/core/evaluation/constants'; import { replaceRegChars } from '@fastgpt/global/common/string/tools'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; + +async function getCollectionStatus(collectionId: string): Promise { + try { + const jobs = await evalDatasetDataSynthesizeQueue.getJobs([ + 'waiting', + 'active', + 'delayed', + 'failed' + ]); + const collectionJobs = jobs.filter((job) => job.data.evalDatasetCollectionId === collectionId); + + if (collectionJobs.length === 0) { + return EvalDatasetCollectionStatusEnum.ready; + } + + if (collectionJobs.some((job) => job.isFailed())) { + return EvalDatasetCollectionStatusEnum.error; + } + + if (collectionJobs.some((job) => job.isActive())) { + return EvalDatasetCollectionStatusEnum.processing; + } + + if (collectionJobs.some((job) => job.isWaiting() || job.isDelayed())) { + return EvalDatasetCollectionStatusEnum.queued; + } + + return EvalDatasetCollectionStatusEnum.ready; + } catch (error) { + console.error('Error getting collection status:', error); + return EvalDatasetCollectionStatusEnum.ready; + } +} async function handler( req: ApiRequestProps @@ -21,11 +57,9 @@ async function handler( per: ReadPermissionVal }); - // Parse request parameters const { offset, pageSize } = parsePaginationRequest(req); const { searchKey } = req.body; - // Build MongoDB pipeline const match: Record = { teamId: new Types.ObjectId(teamId) }; @@ -44,18 +78,26 @@ async function handler( // TODO: Audit Log - Log successful response + const collectionsWithStatus = await Promise.all( + collections.map(async (item) => { + const status = await getCollectionStatus(String(item._id)); + return { + _id: String(item._id), + name: item.name, + description: item.description || '', + createTime: item.createTime, + updateTime: item.updateTime, + dataCountByGen: item.dataCountByGen || 0, + creatorAvatar: item.teamMember?.avatar, + creatorName: item.teamMember?.name, + status + }; + }) + ); + return { total, - list: collections.map((item) => ({ - _id: String(item._id), - name: item.name, - description: item.description || '', - createTime: item.createTime, - updateTime: item.updateTime, - dataCountByGen: item.dataCountByGen || 0, - creatorAvatar: item.teamMember?.avatar, - creatorName: item.teamMember?.name - })) + list: collectionsWithStatus }; } catch (error) { console.error('Database error in eval dataset collection list:', error); diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts new file mode 100644 index 000000000000..c95bc242d103 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts @@ -0,0 +1,87 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { Types } from '@fastgpt/service/common/mongo'; +import type { retryTaskBody } from '@fastgpt/global/core/evaluation/api'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +async function handler( + req: ApiRequestProps +): Promise<{ success: boolean; message: string }> { + const { teamId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const { collectionId, jobId } = req.body; + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: new Types.ObjectId(collectionId), + teamId: new Types.ObjectId(teamId) + }); + + if (!collection) { + throw new Error('Evaluation dataset not found or access denied'); + } + + try { + const job = await evalDatasetDataSynthesizeQueue.getJob(jobId); + + if (!job) { + return { + success: false, + message: 'Task not found' + }; + } + + if (job.data.evalDatasetCollectionId !== collectionId) { + return { + success: false, + message: 'Task does not belong to the specified dataset collection' + }; + } + + if (!job.isFailed()) { + return { + success: false, + message: 'Only failed tasks can be retried' + }; + } + + await job.retry(); + + addLog.info('Task retried successfully', { + jobId, + collectionId, + teamId + }); + + return { + success: true, + message: 'Task retried successfully' + }; + } catch (error) { + console.error('Error retrying task:', error); + addLog.error('Failed to retry task', { + jobId, + collectionId, + teamId, + error + }); + + return { + success: false, + message: 'Failed to retry task' + }; + } +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts index 33c6ef30ba01..24a35a8c0519 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts @@ -15,7 +15,7 @@ export type SmartGenerateEvalDatasetResponse = string; async function handler( req: ApiRequestProps ): Promise { - const { collectionId, datasetCollectionIds, count = 0, intelligentGenerationModel } = req.body; + const { collectionId, datasetCollectionIds, count, intelligentGenerationModel } = req.body; // Parameter validation if (!collectionId || typeof collectionId !== 'string') { @@ -30,10 +30,6 @@ async function handler( return Promise.reject('datasetCollectionIds is required and must be a non-empty array'); } - if (count < 1) { - return Promise.reject('count must be large 1'); - } - if (!intelligentGenerationModel || typeof intelligentGenerationModel !== 'string') { return Promise.reject('intelligentGenerationModel is required and must be a string'); } @@ -63,7 +59,7 @@ async function handler( return Promise.reject('One or more dataset collections not found or no permission'); } - // Calculate total data count from selected collections for validation + // Calculate total data count from selected collections const totalDataCount = await MongoDatasetData.countDocuments({ teamId, collectionId: { $in: datasetCollectionIds } @@ -73,22 +69,30 @@ async function handler( return Promise.reject('Selected dataset collections contain no data'); } - if (count > totalDataCount) { + // Use totalDataCount as default when count is undefined + const finalCount = count !== undefined ? count : totalDataCount; + + // Validate count after setting default + if (finalCount < 1) { + return Promise.reject('count must be greater than 0'); + } + + if (finalCount > totalDataCount) { return Promise.reject( - `Requested count (${count}) exceeds available data count (${totalDataCount}) in selected collections` + `Requested count (${finalCount}) exceeds available data count (${totalDataCount}) in selected collections` ); } try { const job = await addEvalDatasetSmartGenerateJob({ datasetCollectionIds, - count, + count: finalCount, intelligentGenerationModel, evalDatasetCollectionId: collectionId }); await MongoEvalDatasetCollection.findByIdAndUpdate(collectionId, { - $inc: { dataCountByGen: count } + $inc: { dataCountByGen: finalCount } }); // TODO: Add audit log for smart generation operation From ab8cb72712f19470ae54299d3e69c7fd8c431f33 Mon Sep 17 00:00:00 2001 From: Jon Date: Thu, 28 Aug 2025 15:10:45 +0800 Subject: [PATCH 013/255] feat: Add batch quality assessment API endpoint --- packages/global/core/evaluation/api.d.ts | 13 ++ .../collection/qualityAssessmentBatch.ts | 200 ++++++++++++++++++ .../core/evaluation/dataset/data/fileId.ts | 50 +---- .../dataset/data/qualityAssessment.ts | 25 +-- 4 files changed, 228 insertions(+), 60 deletions(-) create mode 100644 projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 3ace7f5d311c..07c4702b5c95 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -103,6 +103,19 @@ export type qualityAssessmentBody = { evalModel: string; }; +export type qualityAssessmentBatchBody = { + collectionId: string; + evalModel: string; +}; + +export type qualityAssessmentBatchResponse = { + success: boolean; + message: string; + processedCount: number; + skippedCount: number; + errorCount: number; +}; + export type deleteEvalDatasetDataQuery = { dataId: string; }; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts new file mode 100644 index 000000000000..fb50f6a87da1 --- /dev/null +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts @@ -0,0 +1,200 @@ +import type { ApiRequestProps } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { + addEvalDatasetDataQualityJob, + removeEvalDatasetDataQualityJob, + evalDatasetDataQualityQueue +} from '@fastgpt/service/core/evaluation/dataQualityMq'; +import type { + qualityAssessmentBatchBody, + qualityAssessmentBatchResponse +} from '@fastgpt/global/core/evaluation/api'; +import { addLog } from '@fastgpt/service/common/system/log'; + +export type QualityAssessmentBatchQuery = {}; +export type QualityAssessmentBatchBody = qualityAssessmentBatchBody; +export type QualityAssessmentBatchResponse = qualityAssessmentBatchResponse; +async function handler( + req: ApiRequestProps +): Promise { + const { collectionId, evalModel } = req.body; + + if (!collectionId || typeof collectionId !== 'string') { + return { + success: false, + message: 'collectionId is required and must be a string', + processedCount: 0, + skippedCount: 0, + errorCount: 0 + }; + } + + if (!evalModel || typeof evalModel !== 'string') { + return { + success: false, + message: 'evalModel is required and must be a string', + processedCount: 0, + skippedCount: 0, + errorCount: 0 + }; + } + + const { teamId, tmbId } = await authUserPer({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + + const collection = await MongoEvalDatasetCollection.findOne({ + _id: collectionId, + teamId + }); + + if (!collection) { + return { + success: false, + message: 'Dataset collection not found or access denied', + processedCount: 0, + skippedCount: 0, + errorCount: 0 + }; + } + + const dataItems = await MongoEvalDatasetData.find({ + datasetId: collectionId, + teamId + }).select('_id'); + + if (dataItems.length === 0) { + return { + success: true, + message: 'No data items found in the collection', + processedCount: 0, + skippedCount: 0, + errorCount: 0 + }; + } + + let processedCount = 0; + let skippedCount = 0; + let errorCount = 0; + + for (const dataItem of dataItems) { + const dataId = dataItem._id.toString(); + + try { + const jobId = await evalDatasetDataQualityQueue.getDeduplicationJobId(dataId); + let jobState = null; + + if (jobId) { + const job = await evalDatasetDataQualityQueue.getJob(jobId); + if (job) { + jobState = await job.getState(); + } + } + + if (jobState === 'active') { + // Active evaluation task -> remove and overwrite + addLog.info('Removing active quality assessment job for re-evaluation', { + dataId, + jobId, + collectionId + }); + await removeEvalDatasetDataQualityJob(dataId); + + // Create new job + await addEvalDatasetDataQualityJob({ + dataId: dataId, + evalModel: evalModel + }); + + // Update metadata + await MongoEvalDatasetData.findByIdAndUpdate(dataId, { + $set: { + 'metadata.qualityStatus': 'queuing', + 'metadata.qualityModel': evalModel, + 'metadata.qualityQueueTime': new Date() + } + }); + + processedCount++; + } else if (jobState && ['waiting', 'delayed', 'prioritized'].includes(jobState)) { + // Tasks in queue -> not affected + addLog.info('Skipping queued quality assessment job', { + dataId, + jobState, + collectionId + }); + skippedCount++; + } else { + // Completed or abnormal tasks -> will be re-evaluated + // This includes: completed, failed, or no existing job + if (jobState && ['completed', 'failed'].includes(jobState)) { + addLog.info('Re-evaluating completed/failed quality assessment job', { + dataId, + jobState, + collectionId + }); + } + + // Remove existing job if any (for completed/failed states) + if (jobId) { + await removeEvalDatasetDataQualityJob(dataId); + } + + // Create new job + await addEvalDatasetDataQualityJob({ + dataId: dataId, + evalModel: evalModel + }); + + // Update metadata + await MongoEvalDatasetData.findByIdAndUpdate(dataId, { + $set: { + 'metadata.qualityStatus': 'queuing', + 'metadata.qualityModel': evalModel, + 'metadata.qualityQueueTime': new Date() + } + }); + + processedCount++; + } + } catch (error) { + addLog.error('Failed to process quality assessment for data item', { + dataId, + collectionId, + error: error instanceof Error ? error.message : String(error) + }); + errorCount++; + } + } + + const message = `Batch quality assessment completed. Processed: ${processedCount}, Skipped: ${skippedCount}, Errors: ${errorCount}`; + + addLog.info('Batch quality assessment completed', { + collectionId, + evalModel, + processedCount, + skippedCount, + errorCount, + totalItems: dataItems.length + }); + + return { + success: errorCount === 0 || processedCount > 0, + message, + processedCount, + skippedCount, + errorCount + }; +} + +export default NextAPI(handler); + +// Export handler for testing +export const handler_test = process.env.NODE_ENV === 'test' ? handler : undefined; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts index c4d39d058b1d..06e04c01a50b 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -136,34 +136,22 @@ async function handler( } = req.body; if (!fileId || typeof fileId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'fileId is required and must be a string' - }); + return 'fileId is required and must be a string'; } if (!datasetCollectionId || typeof datasetCollectionId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'datasetCollectionId is required and must be a string' - }); + return 'datasetCollectionId is required and must be a string'; } if (typeof enableQualityEvaluation !== 'boolean') { - return Promise.reject({ - statusCode: 400, - message: 'enableQualityEvaluation is required and must be a boolean' - }); + return 'enableQualityEvaluation is required and must be a boolean'; } if ( enableQualityEvaluation && (!qualityEvaluationModel || typeof qualityEvaluationModel !== 'string') ) { - return Promise.reject({ - statusCode: 400, - message: 'qualityEvaluationModel is required when enableQualityEvaluation is true' - }); + return 'qualityEvaluationModel is required when enableQualityEvaluation is true'; } const { teamId, tmbId } = await authUserPer({ @@ -183,26 +171,17 @@ async function handler( const filename = file.filename?.toLowerCase() || ''; if (!filename.endsWith('.csv')) { - return Promise.reject({ - statusCode: 400, - message: 'File must be a CSV file' - }); + return 'File must be a CSV file'; } // Verify dataset collection exists and belongs to team const datasetCollection = await MongoEvalDatasetCollection.findById(datasetCollectionId); if (!datasetCollection) { - return Promise.reject({ - statusCode: 404, - message: 'Evaluation dataset collection not found' - }); + return 'Evaluation dataset collection not found'; } if (String(datasetCollection.teamId) !== teamId) { - return Promise.reject({ - statusCode: 403, - message: 'No permission to access this dataset collection' - }); + return 'No permission to access this dataset collection'; } try { @@ -218,18 +197,12 @@ async function handler( const csvRows = parseCSVContent(rawText); if (csvRows.length === 0) { - return Promise.reject({ - statusCode: 400, - message: 'CSV file contains no data rows' - }); + return 'CSV file contains no data rows'; } // Validate row limit (prevent memory issues) if (csvRows.length > 10000) { - return Promise.reject({ - statusCode: 400, - message: 'CSV file cannot contain more than 10,000 rows' - }); + return 'CSV file cannot contain more than 10,000 rows'; } // Prepare data for bulk insert @@ -322,10 +295,7 @@ async function handler( } catch (error: any) { // Handle parsing errors if (error.message && typeof error.message === 'string') { - return Promise.reject({ - statusCode: 400, - message: `CSV parsing error: ${error.message}` - }); + return `CSV parsing error: ${error.message}`; } // Re-throw other errors diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts index b99292e3109d..c82e285e244f 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts @@ -21,17 +21,11 @@ async function handler( const { dataId, evalModel } = req.body; if (!dataId || typeof dataId !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'dataId is required and must be a string' - }); + return 'dataId is required and must be a string'; } if (!evalModel || typeof evalModel !== 'string') { - return Promise.reject({ - statusCode: 400, - message: 'evalModel is required and must be a string' - }); + return 'evalModel is required and must be a string'; } const { teamId, tmbId } = await authUserPer({ @@ -43,10 +37,7 @@ async function handler( const datasetData = await MongoEvalDatasetData.findById(dataId); if (!datasetData) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset data not found' - }); + return 'Dataset data not found'; } const collection = await MongoEvalDatasetCollection.findOne({ @@ -55,10 +46,7 @@ async function handler( }); if (!collection) { - return Promise.reject({ - statusCode: 404, - message: 'Dataset collection not found or access denied' - }); + return 'Dataset collection not found or access denied'; } try { @@ -85,10 +73,7 @@ async function handler( return 'success'; } catch (error) { - return Promise.reject({ - statusCode: 500, - message: error instanceof Error ? error.message : 'Failed to queue quality assessment job' - }); + return error instanceof Error ? error.message : 'Failed to queue quality assessment job'; } } From bed1c73098a10968425270029836353f7da1cfab Mon Sep 17 00:00:00 2001 From: Jon Date: Thu, 28 Aug 2025 20:01:03 +0800 Subject: [PATCH 014/255] feat: Introduce EvalDatasetDataKeyEnum to improve data processing --- packages/global/core/evaluation/api.d.ts | 23 +- packages/global/core/evaluation/constants.ts | 19 +- packages/global/core/evaluation/type.d.ts | 16 +- .../evaluation/{ => dataset}/dataQualityMq.ts | 4 +- .../{ => dataset}/dataQualityProcessor.ts | 32 +- .../{ => dataset}/dataSynthesizeMq.ts | 4 +- .../{ => dataset}/dataSynthesizeProcessor.ts | 57 +- .../evalDatasetCollectionSchema.ts | 9 +- .../{ => dataset}/evalDatasetDataSchema.ts | 21 +- .../{ => dataset}/smartGenerateMq.ts | 4 +- .../{ => dataset}/smartGenerateProcessor.ts | 51 +- packages/service/core/evaluation/index.ts | 10 +- .../evaluation/dataset/collection/create.ts | 8 +- .../dataset/collection/deleteTask.ts | 4 +- .../dataset/collection/failedTasks.ts | 4 +- .../evaluation/dataset/collection/list.ts | 8 +- .../collection/qualityAssessmentBatch.ts | 11 +- .../dataset/collection/retryTask.ts | 4 +- .../evaluation/dataset/collection/update.ts | 8 +- .../core/evaluation/dataset/data/create.ts | 45 +- .../core/evaluation/dataset/data/delete.ts | 8 +- .../core/evaluation/dataset/data/fileId.ts | 53 +- .../api/core/evaluation/dataset/data/list.ts | 31 +- .../dataset/data/qualityAssessment.ts | 9 +- .../evaluation/dataset/data/smartGenerate.ts | 8 +- .../core/evaluation/dataset/data/update.ts | 49 +- .../dataset/collection/create.test.ts | 66 +- .../dataset/collection/list.test.ts | 42 +- .../dataset/collection/update.test.ts | 109 +- .../evaluation/dataset/data/create.test.ts | 686 ++++++++++ .../evaluation/dataset/data/delete.test.ts | 566 ++++++++ .../evaluation/dataset/data/fileId.test.ts | 1141 +++++++++++++++++ .../core/evaluation/dataset/data/list.test.ts | 815 ++++++++++++ .../dataset/data/qualityAssessment.test.ts | 604 +++++++++ .../evaluation/dataset/data/update.test.ts | 961 ++++++++++++++ 35 files changed, 5079 insertions(+), 411 deletions(-) rename packages/service/core/evaluation/{ => dataset}/dataQualityMq.ts (95%) rename packages/service/core/evaluation/{ => dataset}/dataQualityProcessor.ts (81%) rename packages/service/core/evaluation/{ => dataset}/dataSynthesizeMq.ts (95%) rename packages/service/core/evaluation/{ => dataset}/dataSynthesizeProcessor.ts (68%) rename packages/service/core/evaluation/{ => dataset}/evalDatasetCollectionSchema.ts (90%) rename packages/service/core/evaluation/{ => dataset}/evalDatasetDataSchema.ts (82%) rename packages/service/core/evaluation/{ => dataset}/smartGenerateMq.ts (95%) rename packages/service/core/evaluation/{ => dataset}/smartGenerateProcessor.ts (77%) create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/create.test.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/delete.test.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/list.test.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/qualityAssessment.test.ts create mode 100644 test/cases/pages/api/core/evaluation/dataset/data/update.test.ts diff --git a/packages/global/core/evaluation/api.d.ts b/packages/global/core/evaluation/api.d.ts index 07c4702b5c95..197d6d3dc7e0 100644 --- a/packages/global/core/evaluation/api.d.ts +++ b/packages/global/core/evaluation/api.d.ts @@ -4,6 +4,7 @@ import type { EvalDatasetDataSchemaType, EvalDatasetCollectionStatus } from './type'; +import type { EvalDatasetDataKeyEnum } from './constants'; export type listEvaluationsBody = PaginationProps<{ searchKey?: string; @@ -42,7 +43,7 @@ export type listEvalDatasetCollectionBody = PaginationProps<{ export type listEvalDatasetCollectionResponse = PaginationResponse< Pick< EvalDatasetCollectionSchemaType, - '_id' | 'name' | 'description' | 'createTime' | 'updateTime' | 'dataCountByGen' + '_id' | 'name' | 'description' | 'createTime' | 'updateTime' > & { creatorAvatar?: string; creatorName?: string; @@ -61,11 +62,11 @@ export type importEvalDatasetFromFileBody = { } & QualityEvaluationBase; type EvalDatasetDataBase = { - user_input: string; - actual_output?: string; - expected_output: string; - context?: string[]; - retrieval_context?: string[]; + [EvalDatasetDataKeyEnum.UserInput]: string; + [EvalDatasetDataKeyEnum.ActualOutput]?: string; + [EvalDatasetDataKeyEnum.ExpectedOutput]: string; + [EvalDatasetDataKeyEnum.Context]?: string[]; + [EvalDatasetDataKeyEnum.RetrievalContext]?: string[]; }; export type createEvalDatasetDataBody = EvalDatasetDataBase & { @@ -81,11 +82,11 @@ export type listEvalDatasetDataResponse = PaginationResponse< Pick< EvalDatasetDataSchemaType, | '_id' - | 'user_input' - | 'actual_output' - | 'expected_output' - | 'context' - | 'retrieval_context' + | EvalDatasetDataKeyEnum.UserInput + | EvalDatasetDataKeyEnum.ActualOutput + | EvalDatasetDataKeyEnum.ExpectedOutput + | EvalDatasetDataKeyEnum.Context + | EvalDatasetDataKeyEnum.RetrievalContext | 'metadata' | 'createFrom' | 'createTime' diff --git a/packages/global/core/evaluation/constants.ts b/packages/global/core/evaluation/constants.ts index 5f13d35fd4cb..0f020be905f9 100644 --- a/packages/global/core/evaluation/constants.ts +++ b/packages/global/core/evaluation/constants.ts @@ -30,8 +30,25 @@ export enum EvalDatasetDataCreateFromEnum { export const EvalDatasetDataCreateFromValues = Object.values(EvalDatasetDataCreateFromEnum); export enum EvalDatasetCollectionStatusEnum { - queued = 'queued', + queuing = 'queuing', processing = 'processing', error = 'error', ready = 'ready' } + +export enum EvalDatasetDataQualityStatusEnum { + queuing = 'queuing', + evaluating = 'evaluating', + error = 'error', + completed = 'completed' +} + +export enum EvalDatasetDataKeyEnum { + UserInput = 'userInput', + ActualOutput = 'actualOutput', + ExpectedOutput = 'expectedOutput', + Context = 'context', + RetrievalContext = 'retrievalContext' +} + +export const EvalDatasetDataQualityStatusValues = Object.values(EvalDatasetDataQualityStatusEnum); diff --git a/packages/global/core/evaluation/type.d.ts b/packages/global/core/evaluation/type.d.ts index 2075bb6d0db5..14f896a47207 100644 --- a/packages/global/core/evaluation/type.d.ts +++ b/packages/global/core/evaluation/type.d.ts @@ -1,10 +1,13 @@ import type { EvaluationStatusEnum, EvalDatasetDataCreateFromEnum, - EvalDatasetCollectionStatusEnum + EvalDatasetCollectionStatusEnum, + EvalDatasetDataQualityStatusEnum, + EvalDatasetDataKeyEnum } from './constants'; export type EvalDatasetCollectionStatus = EvalDatasetCollectionStatusEnum; +export type EvalDatasetDataQualityStatus = EvalDatasetDataQualityStatusEnum; export type EvaluationSchemaType = { _id: string; @@ -64,7 +67,6 @@ export type EvalDatasetCollectionSchemaType = { description: string; createTime: Date; updateTime: Date; - dataCountByGen: number; metadata: Record; }; @@ -73,11 +75,11 @@ export type EvalDatasetDataSchemaType = { teamId: string; tmbId: string; datasetId: string; - user_input: string; - actual_output: string; - expected_output: string; - context: string[]; - retrieval_context: string[]; + [EvalDatasetDataKeyEnum.UserInput]: string; + [EvalDatasetDataKeyEnum.ActualOutput]: string; + [EvalDatasetDataKeyEnum.ExpectedOutput]: string; + [EvalDatasetDataKeyEnum.Context]: string[]; + [EvalDatasetDataKeyEnum.RetrievalContext]: string[]; metadata: Record; createFrom: EvalDatasetDataCreateFromEnum; createTime: Date; diff --git a/packages/service/core/evaluation/dataQualityMq.ts b/packages/service/core/evaluation/dataset/dataQualityMq.ts similarity index 95% rename from packages/service/core/evaluation/dataQualityMq.ts rename to packages/service/core/evaluation/dataset/dataQualityMq.ts index 68b23238ddf1..48906a609c84 100644 --- a/packages/service/core/evaluation/dataQualityMq.ts +++ b/packages/service/core/evaluation/dataset/dataQualityMq.ts @@ -1,6 +1,6 @@ -import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; +import { getQueue, getWorker, QueueNames } from '../../../common/bullmq'; import { type Processor } from 'bullmq'; -import { addLog } from '../../common/system/log'; +import { addLog } from '../../../common/system/log'; export type EvalDatasetDataQualityData = { dataId: string; diff --git a/packages/service/core/evaluation/dataQualityProcessor.ts b/packages/service/core/evaluation/dataset/dataQualityProcessor.ts similarity index 81% rename from packages/service/core/evaluation/dataQualityProcessor.ts rename to packages/service/core/evaluation/dataset/dataQualityProcessor.ts index 361225fa00d6..3e8984183a21 100644 --- a/packages/service/core/evaluation/dataQualityProcessor.ts +++ b/packages/service/core/evaluation/dataset/dataQualityProcessor.ts @@ -1,15 +1,19 @@ import type { Job } from 'bullmq'; -import { addLog } from '../../common/system/log'; +import { addLog } from '../../../common/system/log'; import { MongoEvalDatasetData } from './evalDatasetDataSchema'; import type { EvalDatasetDataQualityData } from './dataQualityMq'; +import { + EvalDatasetDataKeyEnum, + EvalDatasetDataQualityStatusEnum +} from '@fastgpt/global/core/evaluation/constants'; // FastAPI service interface schemas export type InputData = { - user_input?: string; - actual_output?: string; - expected_output?: string; - context?: string[]; - retrieval_context?: string[]; + [EvalDatasetDataKeyEnum.UserInput]?: string; + [EvalDatasetDataKeyEnum.ActualOutput]?: string; + [EvalDatasetDataKeyEnum.ExpectedOutput]?: string; + [EvalDatasetDataKeyEnum.Context]?: string[]; + [EvalDatasetDataKeyEnum.RetrievalContext]?: string[]; metadata?: Record; }; @@ -99,7 +103,7 @@ export const processEvalDatasetDataQuality = async (job: Job) { // TODO: Implement AI model call for synthesis // This is where we would call the intelligent generation model - // to generate expected_output based on user_input + // to generate expectedOutput based on userInput const synthesizedOutput = await synthesizeExpectedOutput( sourceData.q, intelligentGenerationModel @@ -46,11 +49,11 @@ async function processor(job: Job) { teamId: evalDatasetCollection.teamId, tmbId: evalDatasetCollection.tmbId, datasetId: evalDatasetCollectionId, - user_input: sourceData.q, - expected_output: synthesizedOutput, - actual_output: '', - context: [], - retrieval_context: [], + [EvalDatasetDataKeyEnum.UserInput]: sourceData.q, + [EvalDatasetDataKeyEnum.ExpectedOutput]: synthesizedOutput, + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], metadata: { sourceDataId: sourceData._id, sourceDatasetId: sourceData.datasetId, @@ -71,8 +74,6 @@ async function processor(job: Job) { synthesizedLength: synthesizedOutput.length }); - await checkAndUpdateDatasetStatus(evalDatasetCollectionId); - // TODO: Add audit log // TODO: Add tracking metrics @@ -111,40 +112,6 @@ async function synthesizeExpectedOutput( return `[AI Generated Answer for: ${userInput.substring(0, 100)}${userInput.length > 100 ? '...' : ''}]`; } -async function checkAndUpdateDatasetStatus(evalDatasetCollectionId: string) { - try { - const evalDatasetCollection = - await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); - if (!evalDatasetCollection) return; - - const totalGeneratedCount = await MongoEvalDatasetData.countDocuments({ - datasetId: evalDatasetCollectionId, - createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration - }); - - addLog.info('Dataset synthesis status check', { - evalDatasetCollectionId: evalDatasetCollectionId, - totalGeneratedCount, - expectedCount: evalDatasetCollection.dataCountByGen - }); - - // If we have reached expected count, mark as ready - if (totalGeneratedCount >= evalDatasetCollection.dataCountByGen) { - // TODO: Update dataset status to ready/completed - addLog.info('Eval dataset synthesis completed', { - evalDatasetCollectionId: evalDatasetCollectionId, - totalGeneratedCount, - expectedCount: evalDatasetCollection.dataCountByGen - }); - } - } catch (error) { - addLog.error('Failed to check synthesis status', { - evalDatasetCollectionId: evalDatasetCollectionId, - error - }); - } -} - // Initialize worker export const initEvalDatasetDataSynthesizeWorker = () => { return getEvalDatasetDataSynthesizeWorker(processor); diff --git a/packages/service/core/evaluation/evalDatasetCollectionSchema.ts b/packages/service/core/evaluation/dataset/evalDatasetCollectionSchema.ts similarity index 90% rename from packages/service/core/evaluation/evalDatasetCollectionSchema.ts rename to packages/service/core/evaluation/dataset/evalDatasetCollectionSchema.ts index 3e179d4408cd..241663e39d61 100644 --- a/packages/service/core/evaluation/evalDatasetCollectionSchema.ts +++ b/packages/service/core/evaluation/dataset/evalDatasetCollectionSchema.ts @@ -2,7 +2,7 @@ import { TeamCollectionName, TeamMemberCollectionName } from '@fastgpt/global/support/user/team/constant'; -import { connectionMongo, getMongoModel } from '../../common/mongo'; +import { connectionMongo, getMongoModel } from '../../../common/mongo'; import type { EvalDatasetCollectionSchemaType } from '@fastgpt/global/core/evaluation/type'; const { Schema } = connectionMongo; @@ -31,7 +31,7 @@ const EvalDatasetCollectionSchema = new Schema({ type: String, default: '', trim: true, - maxlength: 500 + maxlength: 100 }, createTime: { type: Date, @@ -42,11 +42,6 @@ const EvalDatasetCollectionSchema = new Schema({ type: Date, default: Date.now }, - dataCountByGen: { - type: Number, - default: 0, - min: 0 - }, metadata: { type: Schema.Types.Mixed, default: {} diff --git a/packages/service/core/evaluation/evalDatasetDataSchema.ts b/packages/service/core/evaluation/dataset/evalDatasetDataSchema.ts similarity index 82% rename from packages/service/core/evaluation/evalDatasetDataSchema.ts rename to packages/service/core/evaluation/dataset/evalDatasetDataSchema.ts index 2d3bc8bea896..1bb2545b0406 100644 --- a/packages/service/core/evaluation/evalDatasetDataSchema.ts +++ b/packages/service/core/evaluation/dataset/evalDatasetDataSchema.ts @@ -1,9 +1,10 @@ import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; -import { connectionMongo, getMongoModel } from '../../common/mongo'; +import { connectionMongo, getMongoModel } from '../../../common/mongo'; import { EvalDatasetCollectionName } from './evalDatasetCollectionSchema'; import { EvalDatasetDataCreateFromEnum, - EvalDatasetDataCreateFromValues + EvalDatasetDataCreateFromValues, + EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; import { TeamCollectionName, @@ -32,22 +33,22 @@ const EvalDatasetDataSchema = new Schema({ required: true, index: true }, - user_input: { + [EvalDatasetDataKeyEnum.UserInput]: { type: String, default: '', trim: true }, - actual_output: { + [EvalDatasetDataKeyEnum.ActualOutput]: { type: String, default: '', trim: true }, - expected_output: { + [EvalDatasetDataKeyEnum.ExpectedOutput]: { type: String, default: '', trim: true }, - context: { + [EvalDatasetDataKeyEnum.Context]: { type: [ { type: String, @@ -60,7 +61,7 @@ const EvalDatasetDataSchema = new Schema({ message: 'Context array cannot exceed 100 items' } }, - retrieval_context: { + [EvalDatasetDataKeyEnum.RetrievalContext]: { type: [ { type: String, @@ -97,9 +98,9 @@ EvalDatasetDataSchema.index({ datasetId: 1, updateTime: -1 }); // Text search index for searching within inputs and outputs EvalDatasetDataSchema.index({ - user_input: 'text', - expected_output: 'text', - actual_output: 'text' + [EvalDatasetDataKeyEnum.UserInput]: 'text', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'text', + [EvalDatasetDataKeyEnum.ActualOutput]: 'text' }); // Update the updateTime on save diff --git a/packages/service/core/evaluation/smartGenerateMq.ts b/packages/service/core/evaluation/dataset/smartGenerateMq.ts similarity index 95% rename from packages/service/core/evaluation/smartGenerateMq.ts rename to packages/service/core/evaluation/dataset/smartGenerateMq.ts index aae08d8d53ba..df7d051fa726 100644 --- a/packages/service/core/evaluation/smartGenerateMq.ts +++ b/packages/service/core/evaluation/dataset/smartGenerateMq.ts @@ -1,6 +1,6 @@ -import { getQueue, getWorker, QueueNames } from '../../common/bullmq'; +import { getQueue, getWorker, QueueNames } from '../../../common/bullmq'; import { type Processor } from 'bullmq'; -import { addLog } from '../../common/system/log'; +import { addLog } from '../../../common/system/log'; export type EvalDatasetSmartGenerateData = { datasetCollectionIds: string[]; diff --git a/packages/service/core/evaluation/smartGenerateProcessor.ts b/packages/service/core/evaluation/dataset/smartGenerateProcessor.ts similarity index 77% rename from packages/service/core/evaluation/smartGenerateProcessor.ts rename to packages/service/core/evaluation/dataset/smartGenerateProcessor.ts index 1e0cb40c6bbb..cacee416f8f6 100644 --- a/packages/service/core/evaluation/smartGenerateProcessor.ts +++ b/packages/service/core/evaluation/dataset/smartGenerateProcessor.ts @@ -1,10 +1,13 @@ import type { Job } from 'bullmq'; import type { HydratedDocument } from 'mongoose'; -import { addLog } from '../../common/system/log'; +import { addLog } from '../../../common/system/log'; import { MongoEvalDatasetCollection } from './evalDatasetCollectionSchema'; import { MongoEvalDatasetData } from './evalDatasetDataSchema'; -import { MongoDatasetData } from '../dataset/data/schema'; -import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import { MongoDatasetData } from '../../dataset/data/schema'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataKeyEnum +} from '@fastgpt/global/core/evaluation/constants'; import type { EvalDatasetDataSchemaType } from '@fastgpt/global/core/evaluation/type'; import { type EvalDatasetSmartGenerateData, @@ -70,11 +73,11 @@ async function processor(job: Job) { teamId: evalDatasetCollection.teamId, tmbId: evalDatasetCollection.tmbId, datasetId: evalDatasetCollectionId, - user_input: sample.q, - expected_output: sample.a, - actual_output: '', - context: [], - retrieval_context: [], + [EvalDatasetDataKeyEnum.UserInput]: sample.q, + [EvalDatasetDataKeyEnum.ExpectedOutput]: sample.a, + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], metadata: { sourceDataId: sample._id, sourceDatasetId: sample.datasetId, @@ -122,11 +125,6 @@ async function processor(job: Job) { }); } - // If all data is complete (no synthesis needed), update dataset status - if (synthesisData.length === 0) { - await checkAndUpdateDatasetStatus(evalDatasetCollectionId); - } - // TODO: Add audit log // TODO: Add tracking metrics @@ -155,33 +153,6 @@ async function processor(job: Job) { } } -async function checkAndUpdateDatasetStatus(evalDatasetCollectionId: string) { - try { - const evalDatasetCollection = - await MongoEvalDatasetCollection.findById(evalDatasetCollectionId); - if (!evalDatasetCollection) return; - - const actualCount = await MongoEvalDatasetData.countDocuments({ - datasetId: evalDatasetCollectionId, - createFrom: EvalDatasetDataCreateFromEnum.intelligentGeneration - }); - - if (actualCount >= evalDatasetCollection.dataCountByGen) { - // TODO: Update dataset status to ready/completed - addLog.info('Eval dataset generation completed', { - evalDatasetCollectionId: evalDatasetCollectionId, - actualCount, - expectedCount: evalDatasetCollection.dataCountByGen - }); - } - } catch (error) { - addLog.error('Failed to check dataset status', { - evalDatasetCollectionId: evalDatasetCollectionId, - error - }); - } -} - // Initialize worker export const initEvalDatasetSmartGenerateWorker = () => { return getEvalDatasetSmartGenerateWorker(processor); diff --git a/packages/service/core/evaluation/index.ts b/packages/service/core/evaluation/index.ts index f95003d83821..a9a97ad95584 100644 --- a/packages/service/core/evaluation/index.ts +++ b/packages/service/core/evaluation/index.ts @@ -40,10 +40,8 @@ import { delay } from '@fastgpt/global/common/system/utils'; import { removeDatasetCiteText } from '../../core/ai/utils'; import { getUserChatInfoAndAuthTeamPoints } from '../../support/permission/auth/team'; import { getRunningUserInfoByTmbId } from '../../support/user/team/utils'; -import { getEvalDatasetDataQualityWorker } from './dataQualityMq'; -import { processEvalDatasetDataQuality } from './dataQualityProcessor'; -import { getEvalDatasetSmartGenerateWorker } from './smartGenerateMq'; -import { getEvalDatasetDataSynthesizeWorker } from './dataSynthesizeMq'; +import { getEvalDatasetDataQualityWorker } from './dataset/dataQualityMq'; +import { processEvalDatasetDataQuality } from './dataset/dataQualityProcessor'; type AppContextType = { appData: AppSchema; @@ -59,7 +57,7 @@ export const initEvaluationWorker = () => { getEvalDatasetDataQualityWorker(processEvalDatasetDataQuality); getEvaluationWorker(processor); - import('./smartGenerateProcessor') + import('./dataset/smartGenerateProcessor') .then(({ initEvalDatasetSmartGenerateWorker }) => { initEvalDatasetSmartGenerateWorker(); addLog.info('Smart generate worker initialized'); @@ -68,7 +66,7 @@ export const initEvaluationWorker = () => { addLog.error('Failed to init smart generate worker', { error }); }); - import('./dataSynthesizeProcessor') + import('./dataset/dataSynthesizeProcessor') .then(({ initEvalDatasetDataSynthesizeWorker }) => { initEvalDatasetDataSynthesizeWorker(); addLog.info('Data synthesize worker initialized'); diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts index 453b50b61d72..518678e55aa7 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/create.ts @@ -3,7 +3,7 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import type { createEvalDatasetCollectionBody } from '@fastgpt/global/core/evaluation/api'; export type EvalDatasetCollectionCreateQuery = {}; @@ -15,7 +15,6 @@ async function handler( ): Promise { const { name, description = '' } = req.body; - // Parameter validation if (!name || typeof name !== 'string' || name.trim().length === 0) { return Promise.reject('Name is required and must be a non-empty string'); } @@ -28,8 +27,8 @@ async function handler( return Promise.reject('Description must be a string'); } - if (description && description.length > 500) { - return Promise.reject('Description must be less than 500 characters'); + if (description && description.length > 100) { + return Promise.reject('Description must be less than 100 characters'); } // Authentication and authorization @@ -50,7 +49,6 @@ async function handler( return Promise.reject('A dataset with this name already exists'); } - // Create dataset collection const datasetId = await mongoSessionRun(async (session) => { const [{ _id }] = await MongoEvalDatasetCollection.create( [ diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts index 8c03f7be5e5a..934579e697d9 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/deleteTask.ts @@ -2,10 +2,10 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { Types } from '@fastgpt/service/common/mongo'; import type { deleteTaskBody } from '@fastgpt/global/core/evaluation/api'; -import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataset/dataSynthesizeMq'; import { addLog } from '@fastgpt/service/common/system/log'; async function handler( diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts index 87e5828df5d5..c2aac52e92c9 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/failedTasks.ts @@ -2,13 +2,13 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { Types } from '@fastgpt/service/common/mongo'; import type { listFailedTasksBody, listFailedTasksResponse } from '@fastgpt/global/core/evaluation/api'; -import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataset/dataSynthesizeMq'; async function handler( req: ApiRequestProps diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts index f490a1f31768..6a2bd3b7ac52 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/list.ts @@ -2,7 +2,7 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; import { Types } from '@fastgpt/service/common/mongo'; import type { @@ -12,7 +12,7 @@ import type { import type { EvalDatasetCollectionStatus } from '@fastgpt/global/core/evaluation/type'; import { EvalDatasetCollectionStatusEnum } from '@fastgpt/global/core/evaluation/constants'; import { replaceRegChars } from '@fastgpt/global/common/string/tools'; -import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataset/dataSynthesizeMq'; async function getCollectionStatus(collectionId: string): Promise { try { @@ -37,7 +37,7 @@ async function getCollectionStatus(collectionId: string): Promise job.isWaiting() || job.isDelayed())) { - return EvalDatasetCollectionStatusEnum.queued; + return EvalDatasetCollectionStatusEnum.queuing; } return EvalDatasetCollectionStatusEnum.ready; @@ -87,7 +87,6 @@ async function handler( description: item.description || '', createTime: item.createTime, updateTime: item.updateTime, - dataCountByGen: item.dataCountByGen || 0, creatorAvatar: item.teamMember?.avatar, creatorName: item.teamMember?.name, status @@ -130,7 +129,6 @@ const buildPipeline = (match: Record, offset: number, pageSize: num description: 1, createTime: 1, updateTime: 1, - dataCountByGen: 1, teamMember: { avatar: 1, name: 1 diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts index fb50f6a87da1..22060f7c9eb4 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/qualityAssessmentBatch.ts @@ -2,18 +2,19 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { addEvalDatasetDataQualityJob, removeEvalDatasetDataQualityJob, evalDatasetDataQualityQueue -} from '@fastgpt/service/core/evaluation/dataQualityMq'; +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; import type { qualityAssessmentBatchBody, qualityAssessmentBatchResponse } from '@fastgpt/global/core/evaluation/api'; import { addLog } from '@fastgpt/service/common/system/log'; +import { EvalDatasetDataQualityStatusEnum } from '@fastgpt/global/core/evaluation/constants'; export type QualityAssessmentBatchQuery = {}; export type QualityAssessmentBatchBody = qualityAssessmentBatchBody; @@ -116,7 +117,7 @@ async function handler( // Update metadata await MongoEvalDatasetData.findByIdAndUpdate(dataId, { $set: { - 'metadata.qualityStatus': 'queuing', + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, 'metadata.qualityModel': evalModel, 'metadata.qualityQueueTime': new Date() } @@ -156,7 +157,7 @@ async function handler( // Update metadata await MongoEvalDatasetData.findByIdAndUpdate(dataId, { $set: { - 'metadata.qualityStatus': 'queuing', + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, 'metadata.qualityModel': evalModel, 'metadata.qualityQueueTime': new Date() } diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts index c95bc242d103..d8eee221813c 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/retryTask.ts @@ -2,10 +2,10 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { Types } from '@fastgpt/service/common/mongo'; import type { retryTaskBody } from '@fastgpt/global/core/evaluation/api'; -import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataSynthesizeMq'; +import { evalDatasetDataSynthesizeQueue } from '@fastgpt/service/core/evaluation/dataset/dataSynthesizeMq'; import { addLog } from '@fastgpt/service/common/system/log'; async function handler( diff --git a/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts index 927e086c481c..fb2eff2c2733 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/collection/update.ts @@ -3,7 +3,7 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import type { updateEvalDatasetCollectionBody } from '@fastgpt/global/core/evaluation/api'; export type EvalDatasetCollectionUpdateQuery = {}; @@ -14,7 +14,6 @@ async function handler( ): Promise { const { collectionId, name, description = '' } = req.body; - // Parameter validation if (!collectionId || typeof collectionId !== 'string' || collectionId.trim().length === 0) { return Promise.reject('Collection ID is required and must be a non-empty string'); } @@ -31,8 +30,8 @@ async function handler( return Promise.reject('Description must be a string'); } - if (description && description.length > 500) { - return Promise.reject('Description must be less than 500 characters'); + if (description && description.length > 100) { + return Promise.reject('Description must be less than 100 characters'); } // TODO: Authentication check - verify user is authenticated via cookie or token @@ -67,7 +66,6 @@ async function handler( return Promise.reject('A dataset with this name already exists'); } - // Update dataset collection try { await mongoSessionRun(async (session) => { await MongoEvalDatasetCollection.updateOne( diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts index 4ff64cf99911..f7fdc2326119 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/create.ts @@ -3,9 +3,12 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; -import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataKeyEnum +} from '@fastgpt/global/core/evaluation/constants'; import type { createEvalDatasetDataBody } from '@fastgpt/global/core/evaluation/api'; export type EvalDatasetDataCreateQuery = {}; @@ -15,27 +18,23 @@ export type EvalDatasetDataCreateResponse = string; async function handler( req: ApiRequestProps ): Promise { - const { collectionId, user_input, actual_output, expected_output, context, retrieval_context } = + const { collectionId, userInput, actualOutput, expectedOutput, context, retrievalContext } = req.body; if (!collectionId || typeof collectionId !== 'string') { return Promise.reject('collectionId is required and must be a string'); } - if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { - return Promise.reject('user_input is required and must be a non-empty string'); + if (!userInput || typeof userInput !== 'string' || userInput.trim().length === 0) { + return Promise.reject('userInput is required and must be a non-empty string'); } - if ( - !expected_output || - typeof expected_output !== 'string' || - expected_output.trim().length === 0 - ) { - return Promise.reject('expected_output is required and must be a non-empty string'); + if (!expectedOutput || typeof expectedOutput !== 'string' || expectedOutput.trim().length === 0) { + return Promise.reject('expectedOutput is required and must be a non-empty string'); } - if (actual_output !== undefined && typeof actual_output !== 'string') { - return Promise.reject('actual_output must be a string if provided'); + if (actualOutput !== undefined && typeof actualOutput !== 'string') { + return Promise.reject('actualOutput must be a string if provided'); } if ( @@ -46,11 +45,11 @@ async function handler( } if ( - retrieval_context !== undefined && - (!Array.isArray(retrieval_context) || - !retrieval_context.every((item) => typeof item === 'string')) + retrievalContext !== undefined && + (!Array.isArray(retrievalContext) || + !retrievalContext.every((item) => typeof item === 'string')) ) { - return Promise.reject('retrieval_context must be an array of strings if provided'); + return Promise.reject('retrievalContext must be an array of strings if provided'); } const { teamId, tmbId } = await authUserPer({ @@ -77,11 +76,11 @@ async function handler( teamId, tmbId, datasetId: collectionId, - user_input: user_input.trim(), - actual_output: actual_output?.trim() || '', - expected_output: expected_output.trim(), - context: context || [], - retrieval_context: retrieval_context || [], + [EvalDatasetDataKeyEnum.UserInput]: userInput.trim(), + [EvalDatasetDataKeyEnum.ActualOutput]: actualOutput?.trim() || '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: expectedOutput.trim(), + [EvalDatasetDataKeyEnum.Context]: context || [], + [EvalDatasetDataKeyEnum.RetrievalContext]: retrievalContext || [], createFrom: EvalDatasetDataCreateFromEnum.manual } ], diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts index 604ac77e0e43..5c7bfecae14c 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/delete.ts @@ -3,13 +3,13 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import type { deleteEvalDatasetDataQuery } from '@fastgpt/global/core/evaluation/api'; import { removeEvalDatasetDataQualityJob, checkEvalDatasetDataQualityJobActive -} from '@fastgpt/service/core/evaluation/dataQualityMq'; +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; import { addLog } from '@fastgpt/service/common/system/log'; export type EvalDatasetDataDeleteQuery = deleteEvalDatasetDataQuery; @@ -21,7 +21,7 @@ async function handler( ): Promise { const { dataId } = req.query; - if (!dataId || typeof dataId !== 'string') { + if (!dataId || typeof dataId !== 'string' || dataId.trim().length === 0) { return Promise.reject('dataId is required and must be a string'); } diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts index 06e04c01a50b..34e1b2a71df2 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -3,14 +3,17 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { EvalDatasetDataCreateFromEnum } from '@fastgpt/global/core/evaluation/constants'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataKeyEnum +} from '@fastgpt/global/core/evaluation/constants'; import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller'; import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; import type { importEvalDatasetFromFileBody } from '@fastgpt/global/core/evaluation/api'; -import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataQualityMq'; +import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; export type EvalDatasetImportFromFileQuery = {}; export type EvalDatasetImportFromFileBody = importEvalDatasetFromFileBody; @@ -22,6 +25,14 @@ const OPTIONAL_CSV_COLUMNS = ['actual_output', 'context', 'retrieval_context', ' const CSV_COLUMNS = [...REQUIRED_CSV_COLUMNS, ...OPTIONAL_CSV_COLUMNS] as const; +const ENUM_TO_CSV_MAPPING = { + [EvalDatasetDataKeyEnum.UserInput]: 'user_input', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'expected_output', + [EvalDatasetDataKeyEnum.ActualOutput]: 'actual_output', + [EvalDatasetDataKeyEnum.Context]: 'context', + [EvalDatasetDataKeyEnum.RetrievalContext]: 'retrieval_context' +} as const; + interface CSVRow { user_input: string; expected_output: string; @@ -62,6 +73,14 @@ function parseCSVLine(line: string): string[] { return result; } +function normalizeHeaderName(header: string): string { + const enumValue = header as keyof typeof ENUM_TO_CSV_MAPPING; + if (ENUM_TO_CSV_MAPPING[enumValue]) { + return ENUM_TO_CSV_MAPPING[enumValue]; + } + return header; +} + function parseCSVContent(csvContent: string): CSVRow[] { const lines = csvContent.split('\n').filter((line) => line.trim()); @@ -71,7 +90,8 @@ function parseCSVContent(csvContent: string): CSVRow[] { // Parse header const headerLine = lines[0]; - const headers = parseCSVLine(headerLine).map((h) => h.replace(/^"|"$/g, '')); + const rawHeaders = parseCSVLine(headerLine).map((h) => h.replace(/^"|"$/g, '')); + const headers = rawHeaders.map(normalizeHeaderName); // Validate CSV structure const missingColumns = REQUIRED_CSV_COLUMNS.filter((col) => !headers.includes(col)); @@ -128,18 +148,13 @@ function parseCSVContent(csvContent: string): CSVRow[] { async function handler( req: ApiRequestProps ): Promise { - const { - fileId, - collectionId: datasetCollectionId, - enableQualityEvaluation, - qualityEvaluationModel - } = req.body; + const { fileId, collectionId, enableQualityEvaluation, qualityEvaluationModel } = req.body; if (!fileId || typeof fileId !== 'string') { return 'fileId is required and must be a string'; } - if (!datasetCollectionId || typeof datasetCollectionId !== 'string') { + if (!collectionId || typeof collectionId !== 'string') { return 'datasetCollectionId is required and must be a string'; } @@ -175,7 +190,7 @@ async function handler( } // Verify dataset collection exists and belongs to team - const datasetCollection = await MongoEvalDatasetCollection.findById(datasetCollectionId); + const datasetCollection = await MongoEvalDatasetCollection.findById(collectionId); if (!datasetCollection) { return 'Evaluation dataset collection not found'; } @@ -258,12 +273,12 @@ async function handler( return { teamId, tmbId, - datasetId: datasetCollectionId, - user_input: row.user_input, - expected_output: row.expected_output, - actual_output: row.actual_output || '', - context: contextArray, - retrieval_context: retrievalContextArray, + datasetId: collectionId, + [EvalDatasetDataKeyEnum.UserInput]: row.user_input, + [EvalDatasetDataKeyEnum.ExpectedOutput]: row.expected_output, + [EvalDatasetDataKeyEnum.ActualOutput]: row.actual_output || '', + [EvalDatasetDataKeyEnum.Context]: contextArray, + [EvalDatasetDataKeyEnum.RetrievalContext]: retrievalContextArray, metadata: metadataObj, createFrom: EvalDatasetDataCreateFromEnum.fileImport }; diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts index 5dcc72bbfae9..1911dd605431 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/list.ts @@ -2,8 +2,8 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { parsePaginationRequest } from '@fastgpt/service/common/api/pagination'; import { Types } from '@fastgpt/service/common/mongo'; import type { @@ -11,6 +11,7 @@ import type { listEvalDatasetDataResponse } from '@fastgpt/global/core/evaluation/api'; import { replaceRegChars } from '@fastgpt/global/common/string/tools'; +import { EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; async function handler( req: ApiRequestProps @@ -53,9 +54,9 @@ async function handler( if (searchKey && typeof searchKey === 'string' && searchKey.trim().length > 0) { const searchRegex = new RegExp(`${replaceRegChars(searchKey.trim())}`, 'i'); match.$or = [ - { user_input: { $regex: searchRegex } }, - { expected_output: { $regex: searchRegex } }, - { actual_output: { $regex: searchRegex } } + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: searchRegex } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: searchRegex } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: searchRegex } } ]; } @@ -80,11 +81,11 @@ async function handler( total, list: dataList.map((item) => ({ _id: String(item._id), - user_input: item.user_input, - actual_output: item.actual_output || '', - expected_output: item.expected_output, - context: item.context || [], - retrieval_context: item.retrieval_context || [], + [EvalDatasetDataKeyEnum.UserInput]: item.userInput, + [EvalDatasetDataKeyEnum.ActualOutput]: item.actualOutput || '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: item.expectedOutput, + [EvalDatasetDataKeyEnum.Context]: item.context || [], + [EvalDatasetDataKeyEnum.RetrievalContext]: item.retrievalContext || [], metadata: item.metadata || {}, createFrom: item.createFrom, createTime: item.createTime, @@ -115,11 +116,11 @@ const buildPipeline = (match: Record, offset: number, pageSize: num { $project: { _id: 1, - user_input: 1, - actual_output: 1, - expected_output: 1, - context: 1, - retrieval_context: 1, + [EvalDatasetDataKeyEnum.UserInput]: 1, + [EvalDatasetDataKeyEnum.ActualOutput]: 1, + [EvalDatasetDataKeyEnum.ExpectedOutput]: 1, + [EvalDatasetDataKeyEnum.Context]: 1, + [EvalDatasetDataKeyEnum.RetrievalContext]: 1, metadata: 1, createFrom: 1, createTime: 1, diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts index c82e285e244f..ba44c4b600b6 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/qualityAssessment.ts @@ -2,14 +2,15 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { addEvalDatasetDataQualityJob, removeEvalDatasetDataQualityJob, checkEvalDatasetDataQualityJobActive -} from '@fastgpt/service/core/evaluation/dataQualityMq'; +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; import type { qualityAssessmentBody } from '@fastgpt/global/core/evaluation/api'; +import { EvalDatasetDataQualityStatusEnum } from '@fastgpt/global/core/evaluation/constants'; export type QualityAssessmentQuery = {}; export type QualityAssessmentBody = qualityAssessmentBody; @@ -62,7 +63,7 @@ async function handler( await MongoEvalDatasetData.findByIdAndUpdate(dataId, { $set: { - 'metadata.qualityStatus': 'queuing', + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, 'metadata.qualityModel': evalModel, 'metadata.qualityQueueTime': new Date() } diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts index 24a35a8c0519..4702ad34c39e 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/smartGenerate.ts @@ -2,11 +2,11 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { MongoDatasetCollection } from '@fastgpt/service/core/dataset/collection/schema'; import { MongoDatasetData } from '@fastgpt/service/core/dataset/data/schema'; import type { smartGenerateEvalDatasetBody } from '@fastgpt/global/core/evaluation/api'; -import { addEvalDatasetSmartGenerateJob } from '@fastgpt/service/core/evaluation/smartGenerateMq'; +import { addEvalDatasetSmartGenerateJob } from '@fastgpt/service/core/evaluation/dataset/smartGenerateMq'; export type SmartGenerateEvalDatasetQuery = {}; export type SmartGenerateEvalDatasetBody = smartGenerateEvalDatasetBody; @@ -91,10 +91,6 @@ async function handler( evalDatasetCollectionId: collectionId }); - await MongoEvalDatasetCollection.findByIdAndUpdate(collectionId, { - $inc: { dataCountByGen: finalCount } - }); - // TODO: Add audit log for smart generation operation // TODO: Add tracking metrics for smart generation diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts index 89043618c0e8..85e79f6a7597 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/update.ts @@ -3,14 +3,15 @@ import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/evalDatasetDataSchema'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import type { updateEvalDatasetDataBody } from '@fastgpt/global/core/evaluation/api'; import { removeEvalDatasetDataQualityJob, addEvalDatasetDataQualityJob -} from '@fastgpt/service/core/evaluation/dataQualityMq'; +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; import { addLog } from '@fastgpt/service/common/system/log'; +import { EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; export type EvalDatasetDataUpdateQuery = {}; export type EvalDatasetDataUpdateBody = updateEvalDatasetDataBody; @@ -21,11 +22,11 @@ async function handler( ): Promise { const { dataId, - user_input, - actual_output, - expected_output, + userInput, + actualOutput, + expectedOutput, context, - retrieval_context, + retrievalContext, enableQualityEvaluation, qualityEvaluationModel } = req.body; @@ -34,20 +35,16 @@ async function handler( return Promise.reject('dataId is required and must be a string'); } - if (!user_input || typeof user_input !== 'string' || user_input.trim().length === 0) { - return Promise.reject('user_input is required and must be a non-empty string'); + if (!userInput || typeof userInput !== 'string' || userInput.trim().length === 0) { + return Promise.reject('userInput is required and must be a non-empty string'); } - if ( - !expected_output || - typeof expected_output !== 'string' || - expected_output.trim().length === 0 - ) { - return Promise.reject('expected_output is required and must be a non-empty string'); + if (!expectedOutput || typeof expectedOutput !== 'string' || expectedOutput.trim().length === 0) { + return Promise.reject('expectedOutput is required and must be a non-empty string'); } - if (actual_output !== undefined && typeof actual_output !== 'string') { - return Promise.reject('actual_output must be a string if provided'); + if (actualOutput !== undefined && typeof actualOutput !== 'string') { + return Promise.reject('actualOutput must be a string if provided'); } if ( @@ -58,11 +55,11 @@ async function handler( } if ( - retrieval_context !== undefined && - (!Array.isArray(retrieval_context) || - !retrieval_context.every((item) => typeof item === 'string')) + retrievalContext !== undefined && + (!Array.isArray(retrievalContext) || + !retrievalContext.every((item) => typeof item === 'string')) ) { - return Promise.reject('retrieval_context must be an array of strings if provided'); + return Promise.reject('retrievalContext must be an array of strings if provided'); } if (typeof enableQualityEvaluation !== 'boolean') { @@ -104,11 +101,11 @@ async function handler( await MongoEvalDatasetData.updateOne( { _id: dataId }, { - user_input: user_input.trim(), - actual_output: actual_output?.trim() || '', - expected_output: expected_output.trim(), - context: context || [], - retrieval_context: retrieval_context || [], + [EvalDatasetDataKeyEnum.UserInput]: userInput.trim(), + [EvalDatasetDataKeyEnum.ActualOutput]: actualOutput?.trim() || '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: expectedOutput.trim(), + [EvalDatasetDataKeyEnum.Context]: context || [], + [EvalDatasetDataKeyEnum.RetrievalContext]: retrievalContext || [], updateTime: new Date() }, { session } diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts index c6927eee5c29..67721ba600ca 100644 --- a/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts +++ b/test/cases/pages/api/core/evaluation/dataset/collection/create.test.ts @@ -2,12 +2,12 @@ import { describe, expect, it, vi, beforeEach } from 'vitest'; import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/create'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; vi.mock('@fastgpt/service/support/permission/user/auth'); vi.mock('@fastgpt/service/common/mongo/sessionRun'); -vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ MongoEvalDatasetCollection: { findOne: vi.fn(), create: vi.fn() @@ -46,10 +46,9 @@ describe('EvalDatasetCollection Create API', () => { body: { description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is empty string', async () => { @@ -57,10 +56,9 @@ describe('EvalDatasetCollection Create API', () => { body: { name: '', description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is only whitespace', async () => { @@ -68,10 +66,9 @@ describe('EvalDatasetCollection Create API', () => { body: { name: ' ', description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is not a string', async () => { @@ -79,10 +76,9 @@ describe('EvalDatasetCollection Create API', () => { body: { name: 123, description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name exceeds 100 characters', async () => { @@ -91,10 +87,9 @@ describe('EvalDatasetCollection Create API', () => { body: { name: longName, description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name must be less than 100 characters' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name must be less than 100 characters' + ); }); it('should reject when description is not a string', async () => { @@ -102,22 +97,18 @@ describe('EvalDatasetCollection Create API', () => { body: { name: 'Test Dataset', description: 123 } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Description must be a string' - }); + await expect(handler_test(req as any)).rejects.toEqual('Description must be a string'); }); - it('should reject when description exceeds 500 characters', async () => { - const longDescription = 'a'.repeat(501); + it('should reject when description exceeds 100 characters', async () => { + const longDescription = 'a'.repeat(101); const req = { body: { name: 'Test Dataset', description: longDescription } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Description must be less than 500 characters' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Description must be less than 100 characters' + ); }); it('should accept valid name without description', async () => { @@ -179,10 +170,9 @@ describe('EvalDatasetCollection Create API', () => { body: { name: 'Test Dataset', description: 'Test description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 409, - message: 'A dataset with this name already exists' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'A dataset with this name already exists' + ); expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ teamId: validTeamId, @@ -320,8 +310,8 @@ describe('EvalDatasetCollection Create API', () => { expect(result).toBe(mockDatasetId); }); - it('should handle exactly 500 character description', async () => { - const exactDescription = 'a'.repeat(500); + it('should handle exactly 100 character description', async () => { + const exactDescription = 'a'.repeat(100); const req = { body: { name: 'Test Dataset', description: exactDescription } }; diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts index bdf8aedda51b..0bce6d8b5a06 100644 --- a/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts +++ b/test/cases/pages/api/core/evaluation/dataset/collection/list.test.ts @@ -1,12 +1,12 @@ import { describe, expect, it, vi, beforeEach } from 'vitest'; import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/list'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; import { Types } from '@fastgpt/service/common/mongo'; vi.mock('@fastgpt/service/support/permission/user/auth'); -vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ MongoEvalDatasetCollection: { aggregate: vi.fn(), countDocuments: vi.fn() @@ -26,7 +26,6 @@ describe('EvalDatasetCollection List API', () => { description: 'First dataset', createTime: new Date('2024-01-01'), updateTime: new Date('2024-01-02'), - dataCountByGen: 5, teamMember: { avatar: 'avatar1.jpg', name: 'User One' @@ -38,7 +37,6 @@ describe('EvalDatasetCollection List API', () => { description: 'Second dataset', createTime: new Date('2024-01-03'), updateTime: new Date('2024-01-04'), - dataCountByGen: 10, teamMember: { avatar: 'avatar2.jpg', name: 'User Two' @@ -260,7 +258,6 @@ describe('EvalDatasetCollection List API', () => { description: 1, createTime: 1, updateTime: 1, - dataCountByGen: 1, teamMember: { avatar: 1, name: 1 @@ -304,20 +301,20 @@ describe('EvalDatasetCollection List API', () => { { _id: '65f5b5b5b5b5b5b5b5b5b5b1', name: 'Dataset 1', + status: 'ready', description: 'First dataset', createTime: expect.any(Date), updateTime: expect.any(Date), - dataCountByGen: 5, creatorAvatar: 'avatar1.jpg', creatorName: 'User One' }, { _id: '65f5b5b5b5b5b5b5b5b5b5b2', name: 'Dataset 2', + status: 'ready', description: 'Second dataset', createTime: expect.any(Date), updateTime: expect.any(Date), - dataCountByGen: 10, creatorAvatar: 'avatar2.jpg', creatorName: 'User Two' } @@ -333,7 +330,6 @@ describe('EvalDatasetCollection List API', () => { description: 'First dataset', createTime: new Date('2024-01-01'), updateTime: new Date('2024-01-02'), - dataCountByGen: 5, teamMember: null } ]; @@ -350,10 +346,10 @@ describe('EvalDatasetCollection List API', () => { expect(result.list[0]).toEqual({ _id: '65f5b5b5b5b5b5b5b5b5b5b1', name: 'Dataset 1', + status: 'ready', description: 'First dataset', createTime: expect.any(Date), updateTime: expect.any(Date), - dataCountByGen: 5, creatorAvatar: undefined, creatorName: undefined }); @@ -366,7 +362,6 @@ describe('EvalDatasetCollection List API', () => { name: 'Dataset 1', createTime: new Date('2024-01-01'), updateTime: new Date('2024-01-02'), - dataCountByGen: 5, teamMember: { avatar: 'avatar1.jpg', name: 'User One' @@ -386,33 +381,6 @@ describe('EvalDatasetCollection List API', () => { expect(result.list[0].description).toBe(''); }); - it('should handle missing dataCountByGen', async () => { - const collectionsWithoutDataCount = [ - { - _id: '65f5b5b5b5b5b5b5b5b5b5b1', - name: 'Dataset 1', - description: 'First dataset', - createTime: new Date('2024-01-01'), - updateTime: new Date('2024-01-02'), - teamMember: { - avatar: 'avatar1.jpg', - name: 'User One' - } - } - ]; - - mockMongoEvalDatasetCollection.aggregate.mockResolvedValue(collectionsWithoutDataCount); - mockMongoEvalDatasetCollection.countDocuments.mockResolvedValue(1); - - const req = { - body: { pageNum: 1, pageSize: 10 } - }; - - const result = await handler_test(req as any); - - expect(result.list[0].dataCountByGen).toBe(0); - }); - it('should convert ObjectId to string', async () => { const req = { body: { pageNum: 1, pageSize: 10 } diff --git a/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts b/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts index ef3ceb1c3c3b..c73cd37e4f45 100644 --- a/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts +++ b/test/cases/pages/api/core/evaluation/dataset/collection/update.test.ts @@ -2,12 +2,12 @@ import { describe, expect, it, vi, beforeEach } from 'vitest'; import { handler_test } from '@/pages/api/core/evaluation/dataset/collection/update'; import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; -import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/evalDatasetCollectionSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; vi.mock('@fastgpt/service/support/permission/user/auth'); vi.mock('@fastgpt/service/common/mongo/sessionRun'); -vi.mock('@fastgpt/service/core/evaluation/evalDatasetCollectionSchema', () => ({ +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ MongoEvalDatasetCollection: { findOne: vi.fn(), updateOne: vi.fn() @@ -55,10 +55,9 @@ describe('EvalDatasetCollection Update API', () => { body: { name: 'Updated Name', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Collection ID is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Collection ID is required and must be a non-empty string' + ); }); it('should reject when collectionId is empty string', async () => { @@ -66,10 +65,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: '', name: 'Updated Name', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Collection ID is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Collection ID is required and must be a non-empty string' + ); }); it('should reject when collectionId is only whitespace', async () => { @@ -77,10 +75,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: ' ', name: 'Updated Name', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Collection ID is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Collection ID is required and must be a non-empty string' + ); }); it('should reject when collectionId is not a string', async () => { @@ -88,10 +85,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: 123, name: 'Updated Name', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Collection ID is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Collection ID is required and must be a non-empty string' + ); }); it('should reject when name is missing', async () => { @@ -99,10 +95,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is empty string', async () => { @@ -110,10 +105,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, name: '', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is only whitespace', async () => { @@ -121,10 +115,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, name: ' ', description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name is not a string', async () => { @@ -132,10 +125,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, name: 123, description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name is required and must be a non-empty string' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name is required and must be a non-empty string' + ); }); it('should reject when name exceeds 100 characters', async () => { @@ -144,10 +136,9 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, name: longName, description: 'Updated description' } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Name must be less than 100 characters' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Name must be less than 100 characters' + ); }); it('should reject when description is not a string', async () => { @@ -155,22 +146,18 @@ describe('EvalDatasetCollection Update API', () => { body: { collectionId: mockCollectionId, name: 'Updated Name', description: 123 } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Description must be a string' - }); + await expect(handler_test(req as any)).rejects.toEqual('Description must be a string'); }); - it('should reject when description exceeds 500 characters', async () => { - const longDescription = 'a'.repeat(501); + it('should reject when description exceeds 100 characters', async () => { + const longDescription = 'a'.repeat(101); const req = { body: { collectionId: mockCollectionId, name: 'Updated Name', description: longDescription } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 400, - message: 'Description must be less than 500 characters' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'Description must be less than 100 characters' + ); }); it('should accept valid parameters', async () => { @@ -255,10 +242,7 @@ describe('EvalDatasetCollection Update API', () => { } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 404, - message: 'Dataset collection not found' - }); + await expect(handler_test(req as any)).rejects.toEqual('Dataset collection not found'); expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ _id: mockCollectionId, @@ -279,10 +263,7 @@ describe('EvalDatasetCollection Update API', () => { } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 404, - message: 'Dataset collection not found' - }); + await expect(handler_test(req as any)).rejects.toEqual('Dataset collection not found'); }); it('should proceed when collection exists and belongs to team', async () => { @@ -321,10 +302,9 @@ describe('EvalDatasetCollection Update API', () => { } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 500, - message: 'A dataset with this name already exists' - }); + await expect(handler_test(req as any)).rejects.toEqual( + 'A dataset with this name already exists' + ); expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ teamId: validTeamId, @@ -500,10 +480,7 @@ describe('EvalDatasetCollection Update API', () => { } }; - await expect(handler_test(req as any)).rejects.toEqual({ - statusCode: 500, - message: 'Failed to update dataset collection' - }); + await expect(handler_test(req as any)).rejects.toEqual('Failed to update dataset collection'); }); }); @@ -522,8 +499,8 @@ describe('EvalDatasetCollection Update API', () => { expect(result).toBe('success'); }); - it('should handle exactly 500 character description', async () => { - const exactDescription = 'a'.repeat(500); + it('should handle exactly 100 character description', async () => { + const exactDescription = 'a'.repeat(100); const req = { body: { collectionId: mockCollectionId, diff --git a/test/cases/pages/api/core/evaluation/dataset/data/create.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/create.test.ts new file mode 100644 index 000000000000..0ae134a57f78 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/create.test.ts @@ -0,0 +1,686 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/create'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataKeyEnum +} from '@fastgpt/global/core/evaluation/constants'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + create: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); + +describe('EvalDatasetData Create API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const mockDataId = '65f5b5b5b5b5b5b5b5b5b5b6'; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetCollection.findOne.mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + } as any); + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback({} as any); + }); + + mockMongoEvalDatasetData.create.mockResolvedValue([{ _id: mockDataId }] as any); + }); + + describe('Parameter Validation', () => { + it('should reject when collectionId is missing', async () => { + const req = { + body: { + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'collectionId is required and must be a string' + ); + }); + + it('should reject when collectionId is not a string', async () => { + const req = { + body: { + collectionId: 123, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'collectionId is required and must be a string' + ); + }); + + it('should reject when userInput is missing', async () => { + const req = { + body: { + collectionId: validCollectionId, + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is empty string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: '', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is only whitespace', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: ' ', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is not a string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 123, + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is missing', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is empty string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: '' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is only whitespace', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: ' ' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is not a string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 123 + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when actualOutput is not a string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + actualOutput: 123 + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'actualOutput must be a string if provided' + ); + }); + + it('should reject when context is not an array', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: 'not an array' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'context must be an array of strings if provided' + ); + }); + + it('should reject when context contains non-string items', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: ['valid', 123, 'also valid'] + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'context must be an array of strings if provided' + ); + }); + + it('should reject when retrievalContext is not an array', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: 'not an array' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'retrievalContext must be an array of strings if provided' + ); + }); + + it('should reject when retrievalContext contains non-string items', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: ['valid', 123, 'also valid'] + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'retrievalContext must be an array of strings if provided' + ); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Collection Validation', () => { + it('should verify collection exists and belongs to team', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: validCollectionId, + teamId: validTeamId + }); + }); + + it('should reject when collection does not exist', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Dataset collection not found or access denied' + ); + }); + + it('should reject when collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Dataset collection not found or access denied' + ); + }); + }); + + describe('Data Creation', () => { + it('should create data with required fields only', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + expect(result).toBe(mockDataId); + }); + + it('should create data with all optional fields', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + actualOutput: 'Actual output', + expectedOutput: 'Test output', + context: ['Context 1', 'Context 2'], + retrievalContext: ['Retrieval 1', 'Retrieval 2'] + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: 'Actual output', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: ['Context 1', 'Context 2'], + [EvalDatasetDataKeyEnum.RetrievalContext]: ['Retrieval 1', 'Retrieval 2'], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should trim whitespace from userInput and expectedOutput', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: ' Test input ', + expectedOutput: ' Test output ' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should trim whitespace from actualOutput', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + actualOutput: ' Actual output ', + expectedOutput: 'Test output' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: 'Actual output', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should handle empty actualOutput', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + actualOutput: '', + expectedOutput: 'Test output' + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should handle empty context array', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: [] + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should handle empty retrievalContext array', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: [] + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.create).toHaveBeenCalledWith( + [ + { + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + createFrom: EvalDatasetDataCreateFromEnum.manual + } + ], + { session: {}, ordered: true } + ); + }); + + it('should return data ID as string', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + expect(typeof result).toBe('string'); + }); + + it('should propagate database creation errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoSessionRun.mockRejectedValue(dbError); + + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + }); + + describe('Edge Cases', () => { + it('should handle very long userInput', async () => { + const longInput = 'a'.repeat(10000); + const req = { + body: { + collectionId: validCollectionId, + userInput: longInput, + expectedOutput: 'Test output' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + + it('should handle very long expectedOutput', async () => { + const longOutput = 'a'.repeat(10000); + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: longOutput + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + + it('should handle special characters in inputs', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input with 特殊字符 and émojis 🚀', + expectedOutput: 'Test output with 特殊字符 and émojis 🎯' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + + it('should handle newlines and tabs in inputs', async () => { + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input\nwith\tnewlines\tand\ttabs', + expectedOutput: 'Test output\nwith\tnewlines\tand\ttabs' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + + it('should handle large context arrays', async () => { + const largeContext = Array.from({ length: 100 }, (_, i) => `Context item ${i}`); + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: largeContext + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + + it('should handle large retrievalContext arrays', async () => { + const largeRetrievalContext = Array.from({ length: 100 }, (_, i) => `Retrieval item ${i}`); + const req = { + body: { + collectionId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: largeRetrievalContext + } + }; + + const result = await handler_test(req as any); + expect(result).toBe(mockDataId); + }); + }); +}); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/delete.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/delete.test.ts new file mode 100644 index 000000000000..2cb8622fd60a --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/delete.test.ts @@ -0,0 +1,566 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/delete'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { + removeEvalDatasetDataQualityJob, + checkEvalDatasetDataQualityJobActive +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + findById: vi.fn(), + deleteOne: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/dataQualityMq'); +vi.mock('@fastgpt/service/common/system/log'); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); +const mockRemoveEvalDatasetDataQualityJob = vi.mocked(removeEvalDatasetDataQualityJob); +const mockCheckEvalDatasetDataQualityJobActive = vi.mocked(checkEvalDatasetDataQualityJobActive); +const mockAddLog = vi.mocked(addLog); + +describe('EvalDatasetData Delete API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const validDataId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b6'; + + const mockSession = { id: 'session-123' }; + const mockExistingData = { + _id: validDataId, + datasetId: validCollectionId, + userInput: 'Test input', + expectedOutput: 'Test output' + }; + + const mockCollection = { + _id: validCollectionId, + teamId: validTeamId, + name: 'Test Collection' + }; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback(mockSession as any); + }); + + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue(mockExistingData) + } as any); + + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(mockCollection) + } as any); + + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + mockMongoEvalDatasetData.deleteOne.mockResolvedValue({ deletedCount: 1 } as any); + + mockAddLog.info = vi.fn(); + mockAddLog.error = vi.fn(); + }); + + describe('Parameter Validation', () => { + it('should reject when dataId is missing', async () => { + const req = { + query: {} + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when dataId is null', async () => { + const req = { + query: { dataId: null } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when dataId is undefined', async () => { + const req = { + query: { dataId: undefined } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when dataId is not a string', async () => { + const req = { + query: { dataId: 123 } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when dataId is empty string', async () => { + const req = { + query: { dataId: '' } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Data Validation', () => { + it('should verify data exists', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalledWith(validDataId); + }); + + it('should reject when data does not exist', async () => { + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toEqual('Dataset data not found'); + }); + + it('should verify collection exists and belongs to team', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: validCollectionId, + teamId: validTeamId + }); + }); + + it('should reject when collection does not exist', async () => { + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Access denied or dataset collection not found' + ); + }); + + it('should reject when collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Access denied or dataset collection not found' + ); + }); + }); + + describe('Quality Job Management', () => { + it('should check for active quality evaluation jobs', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalledWith(validDataId); + }); + + it('should remove active quality job before deletion', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).toHaveBeenCalledWith(validDataId); + expect(mockAddLog.info).toHaveBeenCalledWith( + 'Removing active quality evaluation job before deletion', + { + dataId: validDataId, + teamId: validTeamId + } + ); + expect(mockAddLog.info).toHaveBeenCalledWith( + 'Quality evaluation job removed successfully before deletion', + { + dataId: validDataId, + teamId: validTeamId + } + ); + }); + + it('should continue deletion if quality job removal fails', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + const jobError = new Error('Failed to remove job'); + mockRemoveEvalDatasetDataQualityJob.mockRejectedValue(jobError); + + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + + expect(mockAddLog.error).toHaveBeenCalledWith( + 'Failed to remove quality evaluation job before deletion', + { + dataId: validDataId, + teamId: validTeamId, + error: jobError + } + ); + expect(mockMongoEvalDatasetData.deleteOne).toHaveBeenCalled(); + expect(result).toBe('success'); + }); + + it('should not attempt to remove quality job when none is active', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + expect(mockAddLog.info).not.toHaveBeenCalledWith( + 'Removing active quality evaluation job before deletion', + expect.any(Object) + ); + }); + }); + + describe('Data Deletion', () => { + it('should delete data with correct parameters', async () => { + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.deleteOne).toHaveBeenCalledWith( + { _id: validDataId }, + { session: mockSession } + ); + expect(result).toBe('success'); + }); + + it('should log successful deletion', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockAddLog.info).toHaveBeenCalledWith('Evaluation dataset data deleted successfully', { + dataId: validDataId, + datasetId: validCollectionId, + teamId: validTeamId + }); + }); + + it('should use MongoDB session for all operations', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.findById().session).toHaveBeenCalledWith(mockSession); + expect(mockMongoEvalDatasetCollection.findOne().session).toHaveBeenCalledWith(mockSession); + expect(mockMongoEvalDatasetData.deleteOne).toHaveBeenCalledWith( + { _id: validDataId }, + { session: mockSession } + ); + }); + + it('should return success message', async () => { + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + expect(typeof result).toBe('string'); + }); + }); + + describe('Session Management', () => { + it('should wrap operations in MongoDB session', async () => { + const req = { + query: { dataId: validDataId } + }; + + await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + }); + + it('should propagate session errors', async () => { + const sessionError = new Error('Session failed'); + mockMongoSessionRun.mockRejectedValue(sessionError); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(sessionError); + }); + }); + + describe('Error Handling', () => { + it('should propagate database findById errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockRejectedValue(dbError) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should propagate collection findOne errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockRejectedValue(dbError) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should propagate deleteOne errors', async () => { + const dbError = new Error('Database deletion failed'); + mockMongoEvalDatasetData.deleteOne.mockRejectedValue(dbError); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should propagate quality job check errors', async () => { + const jobError = new Error('Quality job check failed'); + mockCheckEvalDatasetDataQualityJobActive.mockRejectedValue(jobError); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(jobError); + }); + }); + + describe('Edge Cases', () => { + it('should handle valid ObjectId format for dataId', async () => { + const validObjectId = '507f1f77bcf86cd799439011'; + const req = { + query: { dataId: validObjectId } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle data with minimal fields', async () => { + const minimalData = { + _id: validDataId, + datasetId: validCollectionId + }; + + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue(minimalData) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle collection with minimal fields', async () => { + const minimalCollection = { + _id: validCollectionId, + teamId: validTeamId + }; + + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(minimalCollection) + } as any); + + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle whitespace in dataId', async () => { + const req = { + query: { dataId: ' ' } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should handle array dataId', async () => { + const req = { + query: { dataId: [validDataId] } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should handle object dataId', async () => { + const req = { + query: { dataId: { id: validDataId } } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + }); + + describe('Integration Scenarios', () => { + it('should handle complete deletion flow with active quality job', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + + // Verify complete flow + expect(mockAuthUserPer).toHaveBeenCalled(); + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalled(); + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalled(); + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalled(); + expect(mockRemoveEvalDatasetDataQualityJob).toHaveBeenCalled(); + expect(mockMongoEvalDatasetData.deleteOne).toHaveBeenCalled(); + expect(result).toBe('success'); + }); + + it('should handle complete deletion flow without active quality job', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + + const req = { + query: { dataId: validDataId } + }; + + const result = await handler_test(req as any); + + // Verify complete flow + expect(mockAuthUserPer).toHaveBeenCalled(); + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalled(); + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalled(); + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalled(); + expect(mockRemoveEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + expect(mockMongoEvalDatasetData.deleteOne).toHaveBeenCalled(); + expect(result).toBe('success'); + }); + + it('should maintain transaction integrity on session failure', async () => { + const sessionError = new Error('Session rollback'); + mockMongoEvalDatasetData.deleteOne.mockRejectedValue(sessionError); + + const req = { + query: { dataId: validDataId } + }; + + await expect(handler_test(req as any)).rejects.toBe(sessionError); + + // Verify session was used for all operations + expect(mockMongoSessionRun).toHaveBeenCalled(); + }); + }); +}); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts new file mode 100644 index 000000000000..fd164b0b1be5 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts @@ -0,0 +1,1141 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/fileId'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; +import { + EvalDatasetDataCreateFromEnum, + EvalDatasetDataKeyEnum +} from '@fastgpt/global/core/evaluation/constants'; +import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/support/permission/auth/file'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + insertMany: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findById: vi.fn() + } +})); +vi.mock('@fastgpt/service/common/file/gridfs/controller', () => ({ + readFileContentFromMongo: vi.fn() +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/dataQualityMq', () => ({ + addEvalDatasetDataQualityJob: vi.fn() +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockAuthCollectionFile = vi.mocked(authCollectionFile); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); +const mockReadFileContentFromMongo = vi.mocked(readFileContentFromMongo); +const mockAddEvalDatasetDataQualityJob = vi.mocked(addEvalDatasetDataQualityJob); + +describe('EvalDatasetData FileId Import API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const validFileId = 'file123'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const mockInsertedRecords = [ + { _id: '65f5b5b5b5b5b5b5b5b5b5b6' }, + { _id: '65f5b5b5b5b5b5b5b5b5b5b7' } + ]; + + const validCSVContent = `user_input,expected_output,actual_output,context,retrieval_context,metadata +"What is AI?","Artificial Intelligence","AI is...","[""tech"",""science""]","[""AI overview""]","{""category"":""tech""}" +"Define ML","Machine Learning","","","","{}"`; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockAuthCollectionFile.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId, + file: { + _id: validFileId, + filename: 'test.csv', + metadata: { + teamId: validTeamId, + tmbId: validTmbId + } + } + } as any); + + mockMongoEvalDatasetCollection.findById.mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + } as any); + + // Don't set default CSV content - let each test set its own + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: validCSVContent + }); + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback({} as any); + }); + + mockMongoEvalDatasetData.insertMany.mockResolvedValue(mockInsertedRecords as any); + mockAddEvalDatasetDataQualityJob.mockResolvedValue({} as any); + }); + + describe('Parameter Validation', () => { + it('should reject when fileId is missing', async () => { + const req = { + body: { + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('fileId is required and must be a string'); + }); + + it('should reject when fileId is not a string', async () => { + const req = { + body: { + fileId: 123, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('fileId is required and must be a string'); + }); + + it('should reject when collectionId is missing', async () => { + const req = { + body: { + fileId: validFileId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('datasetCollectionId is required and must be a string'); + }); + + it('should reject when collectionId is not a string', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: 123, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('datasetCollectionId is required and must be a string'); + }); + + it('should reject when enableQualityEvaluation is missing', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('enableQualityEvaluation is required and must be a boolean'); + }); + + it('should reject when enableQualityEvaluation is not a boolean', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: 'true' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('enableQualityEvaluation is required and must be a boolean'); + }); + + it('should reject when enableQualityEvaluation is true but qualityEvaluationModel is missing', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: true + } + }; + + const result = await handler_test(req as any); + expect(result).toBe( + 'qualityEvaluationModel is required when enableQualityEvaluation is true' + ); + }); + + it('should reject when enableQualityEvaluation is true but qualityEvaluationModel is not a string', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: true, + qualityEvaluationModel: 123 + } + }; + + const result = await handler_test(req as any); + expect(result).toBe( + 'qualityEvaluationModel is required when enableQualityEvaluation is true' + ); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should call authCollectionFile with correct parameters', async () => { + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockAuthCollectionFile).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + fileId: validFileId, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + + it('should propagate file authentication errors', async () => { + const fileAuthError = new Error('File authentication failed'); + mockAuthCollectionFile.mockRejectedValue(fileAuthError); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toBe(fileAuthError); + }); + }); + + describe('File Validation', () => { + it('should reject non-CSV files', async () => { + mockAuthCollectionFile.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId, + file: { + _id: validFileId, + filename: 'test.txt', + metadata: {} + } + } as any); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('File must be a CSV file'); + }); + + it('should handle files with uppercase CSV extension', async () => { + mockAuthCollectionFile.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId, + file: { + _id: validFileId, + filename: 'test.CSV', + metadata: {} + } + } as any); + + const uppercaseCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: uppercaseCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle files without filename', async () => { + mockAuthCollectionFile.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId, + file: { + _id: validFileId, + filename: undefined, + metadata: {} + } + } as any); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('File must be a CSV file'); + }); + }); + + describe('Dataset Collection Validation', () => { + it('should reject when dataset collection does not exist', async () => { + mockMongoEvalDatasetCollection.findById.mockResolvedValue(null); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Evaluation dataset collection not found'); + }); + + it('should reject when dataset collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findById.mockResolvedValue({ + _id: validCollectionId, + teamId: 'different-team' + } as any); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('No permission to access this dataset collection'); + }); + }); + + describe('CSV Parsing', () => { + it('should parse valid CSV with all columns', async () => { + // Override the mock to return the full CSV with all columns + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: validCSVContent + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockReadFileContentFromMongo).toHaveBeenCalledWith({ + teamId: validTeamId, + tmbId: validTmbId, + bucketName: BucketNameEnum.dataset, + fileId: validFileId, + getFormatText: false + }); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Artificial Intelligence', + [EvalDatasetDataKeyEnum.ActualOutput]: 'AI is...', + [EvalDatasetDataKeyEnum.Context]: ['tech', 'science'], + [EvalDatasetDataKeyEnum.RetrievalContext]: ['AI overview'], + metadata: { category: 'tech' }, + createFrom: EvalDatasetDataCreateFromEnum.fileImport + }), + expect.objectContaining({ + teamId: validTeamId, + tmbId: validTmbId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Define ML', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Machine Learning', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + metadata: {}, + createFrom: EvalDatasetDataCreateFromEnum.fileImport + }) + ]), + { + session: {}, + ordered: false + } + ); + + expect(result).toBe('success'); + }); + + it('should parse CSV with only required columns', async () => { + const minimalCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence" +"Define ML","Machine Learning"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: minimalCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Artificial Intelligence', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + metadata: {} + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + + it('should reject CSV missing required columns', async () => { + const invalidCSV = `question,answer +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: invalidCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toMatch(/CSV parsing error: CSV file is missing required columns/); + }); + + it('should reject empty CSV file', async () => { + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: '' + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toMatch(/CSV parsing error: CSV file is empty/); + }); + + it('should reject CSV with no data rows', async () => { + const headerOnlyCSV = 'user_input,expected_output'; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: headerOnlyCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('CSV file contains no data rows'); + }); + + it('should reject CSV with too many rows', async () => { + const largeCSVHeader = 'user_input,expected_output\n'; + const largeCSVRows = Array.from( + { length: 10001 }, + (_, i) => `"Question ${i}","Answer ${i}"` + ).join('\n'); + const largeCSV = largeCSVHeader + largeCSVRows; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: largeCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('CSV file cannot contain more than 10,000 rows'); + }); + + it('should handle CSV with inconsistent column count', async () => { + const inconsistentCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence" +"Define ML","Machine Learning","Extra column"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: inconsistentCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toMatch(/CSV parsing error: Row 3: Expected 2 columns, got 3/); + }); + + it('should handle CSV with quoted fields containing commas', async () => { + const quotedCSV = `user_input,expected_output +"What is AI, really?","Artificial Intelligence, a branch of computer science"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: quotedCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI, really?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: + 'Artificial Intelligence, a branch of computer science' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + + it('should handle CSV with escaped quotes', async () => { + const escapedQuotesCSV = `user_input,expected_output +"What is ""AI""?","It's ""Artificial Intelligence""."`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: escapedQuotesCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is "AI"?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'It\'s "Artificial Intelligence".' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + + it('should handle CSV with enum column names', async () => { + const enumCSV = `userInput,expectedOutput +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: enumCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Artificial Intelligence' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + }); + + describe('Context and Metadata Parsing', () => { + it('should parse JSON context arrays', async () => { + const contextCSV = `user_input,expected_output,context +"What is AI?","Artificial Intelligence","[""tech"", ""science""]"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: contextCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.Context]: ['tech', 'science'] + }) + ]), + expect.any(Object) + ); + }); + + it('should parse single string context', async () => { + const contextCSV = `user_input,expected_output,context +"What is AI?","Artificial Intelligence","technology"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: contextCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.Context]: ['technology'] + }) + ]), + expect.any(Object) + ); + }); + + it('should handle invalid JSON context gracefully', async () => { + const contextCSV = `user_input,expected_output,context +"What is AI?","Artificial Intelligence","invalid json"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: contextCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.Context]: ['invalid json'] + }) + ]), + expect.any(Object) + ); + }); + + it('should parse metadata objects', async () => { + const metadataCSV = `user_input,expected_output,metadata +"What is AI?","Artificial Intelligence","{""category"": ""tech"", ""priority"": 1}"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: metadataCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + metadata: { category: 'tech', priority: 1 } + }) + ]), + expect.any(Object) + ); + }); + + it('should handle invalid JSON metadata gracefully', async () => { + const metadataCSV = `user_input,expected_output,metadata +"What is AI?","Artificial Intelligence","invalid json"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: metadataCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + metadata: {} + }) + ]), + expect.any(Object) + ); + }); + + it('should filter out non-string items from context arrays', async () => { + const contextCSV = `user_input,expected_output,context +"What is AI?","Artificial Intelligence","[""tech"", 123, ""science"", null, ""AI""]"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: contextCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.Context]: ['tech', 'science', 'AI'] + }) + ]), + expect.any(Object) + ); + }); + }); + + describe('Quality Evaluation', () => { + it('should not trigger quality evaluation when disabled', async () => { + const simpleCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: simpleCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockAddEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + }); + + it('should trigger quality evaluation when enabled', async () => { + const qualityEvaluationModel = 'gpt-4'; + // Override the mock to return the full CSV with all columns + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: validCSVContent + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: true, + qualityEvaluationModel + } + }; + + await handler_test(req as any); + + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledTimes(2); + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: '65f5b5b5b5b5b5b5b5b5b5b6', + evalModel: qualityEvaluationModel + }); + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: '65f5b5b5b5b5b5b5b5b5b5b7', + evalModel: qualityEvaluationModel + }); + }); + + it('should handle quality evaluation job failures gracefully', async () => { + const qualityEvaluationModel = 'gpt-4'; + mockAddEvalDatasetDataQualityJob.mockRejectedValueOnce(new Error('Queue error')); + + const simpleCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: simpleCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: true, + qualityEvaluationModel + } + }; + + const result = await handler_test(req as any); + + // Should still succeed even if some quality evaluation jobs fail + expect(result).toBe('success'); + }); + }); + + describe('Database Operations', () => { + it('should use session for database operations', async () => { + const simpleCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: simpleCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith(expect.any(Array), { + session: {}, + ordered: false + }); + }); + + it('should handle database insertion errors', async () => { + const dbError = new Error('Database error'); + mockMongoSessionRun.mockRejectedValue(dbError); + + const simpleCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: simpleCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('CSV parsing error: Database error'); + }); + + it('should propagate file reading errors', async () => { + const fileError = new Error('File read error'); + mockReadFileContentFromMongo.mockRejectedValue(fileError); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('CSV parsing error: File read error'); + }); + }); + + describe('Edge Cases', () => { + it('should handle CSV with empty lines', async () => { + const csvWithEmptyLines = `user_input,expected_output + +"What is AI?","Artificial Intelligence" + +"Define ML","Machine Learning" +`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: csvWithEmptyLines + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI?' + }), + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'Define ML' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + + it('should handle special characters in CSV content', async () => { + const specialCharCSV = `user_input,expected_output +"What is AI? 🤖","人工智能 (AI) is..." +"Définir ML","Machine Learning avec émojis 📊"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: specialCharCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI? 🤖', + [EvalDatasetDataKeyEnum.ExpectedOutput]: '人工智能 (AI) is...' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + + it('should handle CSV with special characters in fields', async () => { + const specialCharCSV = `user_input,expected_output +"What is AI? (Define it)","Artificial Intelligence: a field of computer science" +"ML vs DL?","Machine Learning differs from Deep Learning"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: specialCharCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.insertMany).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI? (Define it)', + [EvalDatasetDataKeyEnum.ExpectedOutput]: + 'Artificial Intelligence: a field of computer science' + }), + expect.objectContaining({ + [EvalDatasetDataKeyEnum.UserInput]: 'ML vs DL?', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Machine Learning differs from Deep Learning' + }) + ]), + expect.any(Object) + ); + + expect(result).toBe('success'); + }); + }); + + describe('Return Values', () => { + it('should return success string on successful import', async () => { + const simpleCSV = `user_input,expected_output +"What is AI?","Artificial Intelligence"`; + + mockReadFileContentFromMongo.mockResolvedValue({ + rawText: simpleCSV + }); + + const req = { + body: { + fileId: validFileId, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + expect(typeof result).toBe('string'); + }); + + it('should return error messages as strings', async () => { + const req = { + body: { + fileId: 123, + collectionId: validCollectionId, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(typeof result).toBe('string'); + expect(result).toBe('fileId is required and must be a string'); + }); + }); +}); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/list.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/list.test.ts new file mode 100644 index 000000000000..6a5cd7b750f6 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/list.test.ts @@ -0,0 +1,815 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/list'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { ReadPermissionVal } from '@fastgpt/global/support/permission/constant'; +import { Types } from '@fastgpt/service/common/mongo'; +import { EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + aggregate: vi.fn(), + countDocuments: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); + +describe('EvalDatasetData List API', () => { + const validTeamId = '65f5b5b5b5b5b5b5b5b5b5b0'; + const validTmbId = '65f5b5b5b5b5b5b5b5b5b5b9'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b1'; + + const mockDataItems = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + userInput: 'What is AI?', + actualOutput: 'AI stands for Artificial Intelligence', + expectedOutput: 'Artificial Intelligence is a field of computer science', + context: ['Machine learning context'], + retrievalContext: ['AI knowledge base'], + metadata: { quality: 'good' }, + createFrom: 'manual', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02') + }, + { + _id: '65f5b5b5b5b5b5b5b5b5b5b3', + userInput: 'How does ML work?', + actualOutput: '', + expectedOutput: 'Machine Learning works by training algorithms', + context: [], + retrievalContext: [], + metadata: {}, + createFrom: 'auto', + createTime: new Date('2024-01-03'), + updateTime: new Date('2024-01-04') + } + ]; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetCollection.findOne.mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + } as any); + + mockMongoEvalDatasetData.aggregate.mockResolvedValue(mockDataItems); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(2); + }); + + describe('Parameter Validation', () => { + it('should reject when collectionId is missing', async () => { + const req = { + body: { pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow('Collection ID is required'); + }); + + it('should reject when collectionId is empty string', async () => { + const req = { + body: { collectionId: '', pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow('Collection ID is required'); + }); + + it('should reject when collectionId is null', async () => { + const req = { + body: { collectionId: null, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow('Collection ID is required'); + }); + + it('should reject when collectionId is undefined', async () => { + const req = { + body: { collectionId: undefined, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow('Collection ID is required'); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: ReadPermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Collection Validation', () => { + it('should verify collection exists and belongs to team', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: new Types.ObjectId(validCollectionId), + teamId: new Types.ObjectId(validTeamId) + }); + }); + + it('should reject when collection does not exist', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow( + 'Collection not found or access denied' + ); + }); + + it('should reject when collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toThrow( + 'Collection not found or access denied' + ); + }); + }); + + describe('Pagination', () => { + it('should handle default pagination parameters', async () => { + const req = { + body: { collectionId: validCollectionId, pageSize: 20 } + }; + + const result = await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { $match: { datasetId: new Types.ObjectId(validCollectionId) } }, + { $sort: { createTime: -1 } }, + { $skip: 0 }, + { $limit: 20 } + ]) + ); + expect(result.total).toBe(2); + expect(result.list).toHaveLength(2); + }); + + it('should handle custom pagination parameters', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 2, pageSize: 5 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { $match: { datasetId: new Types.ObjectId(validCollectionId) } }, + { $sort: { createTime: -1 } }, + { $skip: 5 }, + { $limit: 5 } + ]) + ); + }); + + it('should handle page number 1', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 0 }, { $limit: 10 }]) + ); + }); + + it('should handle high page numbers', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 10, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 90 }, { $limit: 10 }]) + ); + }); + + it('should handle large page sizes', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 100 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 0 }, { $limit: 100 }]) + ); + }); + }); + + describe('Search Functionality', () => { + it('should handle empty search key', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: '', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { datasetId: new Types.ObjectId(validCollectionId) } }]) + ); + }); + + it('should handle whitespace-only search key', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: ' ', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { datasetId: new Types.ObjectId(validCollectionId) } }]) + ); + }); + + it('should handle valid search key with OR conditions', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: 'AI', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + datasetId: new Types.ObjectId(validCollectionId), + $or: [ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: new RegExp('AI', 'i') } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: new RegExp('AI', 'i') } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: new RegExp('AI', 'i') } } + ] + }; + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + + expect(mockMongoEvalDatasetData.countDocuments).toHaveBeenCalledWith(expectedMatch); + }); + + it('should trim search key before processing', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: ' ML ', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + datasetId: new Types.ObjectId(validCollectionId), + $or: [ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: new RegExp('ML', 'i') } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: new RegExp('ML', 'i') } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: new RegExp('ML', 'i') } } + ] + }; + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + }); + + it('should escape special regex characters in search key', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: 'What[?]', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + datasetId: new Types.ObjectId(validCollectionId), + $or: [ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: new RegExp('What\\[\\?\\]', 'i') } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: new RegExp('What\\[\\?\\]', 'i') } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: new RegExp('What\\[\\?\\]', 'i') } } + ] + }; + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + }); + + it('should handle non-string search key', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: 123, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { datasetId: new Types.ObjectId(validCollectionId) } }]) + ); + }); + }); + + describe('MongoDB Pipeline', () => { + it('should build correct aggregation pipeline without search', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith([ + { $match: { datasetId: new Types.ObjectId(validCollectionId) } }, + { $sort: { createTime: -1 } }, + { $skip: 0 }, + { $limit: 10 }, + { + $project: { + _id: 1, + [EvalDatasetDataKeyEnum.UserInput]: 1, + [EvalDatasetDataKeyEnum.ActualOutput]: 1, + [EvalDatasetDataKeyEnum.ExpectedOutput]: 1, + [EvalDatasetDataKeyEnum.Context]: 1, + [EvalDatasetDataKeyEnum.RetrievalContext]: 1, + metadata: 1, + createFrom: 1, + createTime: 1, + updateTime: 1 + } + } + ]); + }); + + it('should include search filter in pipeline when searchKey provided', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: 'test', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { + $match: { + datasetId: new Types.ObjectId(validCollectionId), + $or: [ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: new RegExp('test', 'i') } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: new RegExp('test', 'i') } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: new RegExp('test', 'i') } } + ] + } + } + ]) + ); + }); + + it('should sort by createTime descending', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $sort: { createTime: -1 } }]) + ); + }); + }); + + describe('Response Format', () => { + it('should return correct response structure', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result).toEqual({ + total: 2, + list: [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + [EvalDatasetDataKeyEnum.UserInput]: 'What is AI?', + [EvalDatasetDataKeyEnum.ActualOutput]: 'AI stands for Artificial Intelligence', + [EvalDatasetDataKeyEnum.ExpectedOutput]: + 'Artificial Intelligence is a field of computer science', + [EvalDatasetDataKeyEnum.Context]: ['Machine learning context'], + [EvalDatasetDataKeyEnum.RetrievalContext]: ['AI knowledge base'], + metadata: { quality: 'good' }, + createFrom: 'manual', + createTime: expect.any(Date), + updateTime: expect.any(Date) + }, + { + _id: '65f5b5b5b5b5b5b5b5b5b5b3', + [EvalDatasetDataKeyEnum.UserInput]: 'How does ML work?', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: + 'Machine Learning works by training algorithms', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + metadata: {}, + createFrom: 'auto', + createTime: expect.any(Date), + updateTime: expect.any(Date) + } + ] + }); + }); + + it('should handle missing actualOutput with empty string', async () => { + const dataWithMissingActualOutput = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + userInput: 'What is AI?', + expectedOutput: 'Artificial Intelligence', + context: [], + retrievalContext: [], + metadata: {}, + createFrom: 'manual', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02') + } + ]; + + mockMongoEvalDatasetData.aggregate.mockResolvedValue(dataWithMissingActualOutput); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(1); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0][EvalDatasetDataKeyEnum.ActualOutput]).toBe(''); + }); + + it('should handle missing context with empty array', async () => { + const dataWithMissingContext = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + userInput: 'What is AI?', + actualOutput: 'AI stands for Artificial Intelligence', + expectedOutput: 'Artificial Intelligence', + retrievalContext: [], + metadata: {}, + createFrom: 'manual', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02') + } + ]; + + mockMongoEvalDatasetData.aggregate.mockResolvedValue(dataWithMissingContext); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(1); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0][EvalDatasetDataKeyEnum.Context]).toEqual([]); + }); + + it('should handle missing retrievalContext with empty array', async () => { + const dataWithMissingRetrievalContext = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + userInput: 'What is AI?', + actualOutput: 'AI stands for Artificial Intelligence', + expectedOutput: 'Artificial Intelligence', + context: [], + metadata: {}, + createFrom: 'manual', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02') + } + ]; + + mockMongoEvalDatasetData.aggregate.mockResolvedValue(dataWithMissingRetrievalContext); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(1); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0][EvalDatasetDataKeyEnum.RetrievalContext]).toEqual([]); + }); + + it('should handle missing metadata with empty object', async () => { + const dataWithMissingMetadata = [ + { + _id: '65f5b5b5b5b5b5b5b5b5b5b2', + userInput: 'What is AI?', + actualOutput: 'AI stands for Artificial Intelligence', + expectedOutput: 'Artificial Intelligence', + context: [], + retrievalContext: [], + createFrom: 'manual', + createTime: new Date('2024-01-01'), + updateTime: new Date('2024-01-02') + } + ]; + + mockMongoEvalDatasetData.aggregate.mockResolvedValue(dataWithMissingMetadata); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(1); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result.list[0].metadata).toEqual({}); + }); + + it('should convert ObjectId to string', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + result.list.forEach((item) => { + expect(typeof item._id).toBe('string'); + }); + }); + }); + + describe('Empty Results', () => { + it('should handle empty data list', async () => { + mockMongoEvalDatasetData.aggregate.mockResolvedValue([]); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(0); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + const result = await handler_test(req as any); + + expect(result).toEqual({ + total: 0, + list: [] + }); + }); + + it('should handle zero total count with search', async () => { + mockMongoEvalDatasetData.aggregate.mockResolvedValue([]); + mockMongoEvalDatasetData.countDocuments.mockResolvedValue(0); + + const req = { + body: { + collectionId: validCollectionId, + searchKey: 'nonexistent', + pageNum: 1, + pageSize: 10 + } + }; + + const result = await handler_test(req as any); + + expect(result.total).toBe(0); + expect(result.list).toEqual([]); + }); + }); + + describe('Error Handling', () => { + it('should propagate database aggregate errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetData.aggregate.mockRejectedValue(dbError); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should propagate database count errors', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetData.countDocuments.mockRejectedValue(dbError); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should handle Promise.all rejection', async () => { + const dbError = new Error('Database connection failed'); + mockMongoEvalDatasetData.aggregate.mockResolvedValue(mockDataItems); + mockMongoEvalDatasetData.countDocuments.mockRejectedValue(dbError); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + + it('should handle collection findOne errors', async () => { + const dbError = new Error('Collection query failed'); + mockMongoEvalDatasetCollection.findOne.mockRejectedValue(dbError); + + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + }); + + describe('Collection Isolation', () => { + it('should filter results by collection ID', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: { datasetId: new Types.ObjectId(validCollectionId) } }]) + ); + + expect(mockMongoEvalDatasetData.countDocuments).toHaveBeenCalledWith({ + datasetId: new Types.ObjectId(validCollectionId) + }); + }); + + it('should include collection ID in search filter', async () => { + const req = { + body: { collectionId: validCollectionId, searchKey: 'test', pageNum: 1, pageSize: 10 } + }; + + await handler_test(req as any); + + const expectedMatch = { + datasetId: new Types.ObjectId(validCollectionId), + $or: [ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: new RegExp('test', 'i') } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: new RegExp('test', 'i') } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: new RegExp('test', 'i') } } + ] + }; + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $match: expectedMatch }]) + ); + + expect(mockMongoEvalDatasetData.countDocuments).toHaveBeenCalledWith(expectedMatch); + }); + }); + + describe('Edge Cases', () => { + it('should handle very large page size', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 1, pageSize: 1000 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $limit: 1000 }]) + ); + }); + + it('should handle high page number', async () => { + const req = { + body: { collectionId: validCollectionId, pageNum: 100, pageSize: 10 } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([{ $skip: 990 }]) + ); + }); + + it('should handle complex search patterns with special characters', async () => { + const complexSearchKey = 'What[?]*+^$.|(){}\\'; + const req = { + body: { + collectionId: validCollectionId, + searchKey: complexSearchKey, + pageNum: 1, + pageSize: 10 + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { + $match: { + datasetId: new Types.ObjectId(validCollectionId), + $or: expect.arrayContaining([ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: expect.any(RegExp) } }, + { [EvalDatasetDataKeyEnum.ExpectedOutput]: { $regex: expect.any(RegExp) } }, + { [EvalDatasetDataKeyEnum.ActualOutput]: { $regex: expect.any(RegExp) } } + ]) + } + } + ]) + ); + }); + + it('should handle unicode characters in search', async () => { + const unicodeSearchKey = '人工智能 🤖 émojis'; + const req = { + body: { + collectionId: validCollectionId, + searchKey: unicodeSearchKey, + pageNum: 1, + pageSize: 10 + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.aggregate).toHaveBeenCalledWith( + expect.arrayContaining([ + { + $match: { + datasetId: new Types.ObjectId(validCollectionId), + $or: expect.arrayContaining([ + { [EvalDatasetDataKeyEnum.UserInput]: { $regex: expect.any(RegExp) } } + ]) + } + } + ]) + ); + }); + + it('should handle very long search keys', async () => { + const longSearchKey = 'a'.repeat(1000); + const req = { + body: { + collectionId: validCollectionId, + searchKey: longSearchKey, + pageNum: 1, + pageSize: 10 + } + }; + + const result = await handler_test(req as any); + expect(result).toBeDefined(); + }); + + it('should handle invalid ObjectId format for collectionId', async () => { + const invalidCollectionId = 'invalid-object-id'; + const req = { + body: { collectionId: invalidCollectionId, pageNum: 1, pageSize: 10 } + }; + + // MongoDB ObjectId constructor will throw BSONError for invalid format + await expect(handler_test(req as any)).rejects.toThrow( + 'input must be a 24 character hex string' + ); + }); + }); +}); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/qualityAssessment.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/qualityAssessment.test.ts new file mode 100644 index 000000000000..f5aa16eee987 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/qualityAssessment.test.ts @@ -0,0 +1,604 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/qualityAssessment'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { + addEvalDatasetDataQualityJob, + removeEvalDatasetDataQualityJob, + checkEvalDatasetDataQualityJobActive +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { EvalDatasetDataQualityStatusEnum } from '@fastgpt/global/core/evaluation/constants'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + findById: vi.fn(), + findByIdAndUpdate: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/dataQualityMq', () => ({ + addEvalDatasetDataQualityJob: vi.fn(), + removeEvalDatasetDataQualityJob: vi.fn(), + checkEvalDatasetDataQualityJobActive: vi.fn() +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); +const mockAddEvalDatasetDataQualityJob = vi.mocked(addEvalDatasetDataQualityJob); +const mockRemoveEvalDatasetDataQualityJob = vi.mocked(removeEvalDatasetDataQualityJob); +const mockCheckEvalDatasetDataQualityJobActive = vi.mocked(checkEvalDatasetDataQualityJobActive); + +describe('QualityAssessment API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const validDataId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b6'; + const validEvalModel = 'gpt-4'; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetData.findById.mockResolvedValue({ + _id: validDataId, + datasetId: validCollectionId + } as any); + + mockMongoEvalDatasetCollection.findOne.mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + } as any); + + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + mockAddEvalDatasetDataQualityJob.mockResolvedValue({} as any); + mockMongoEvalDatasetData.findByIdAndUpdate.mockResolvedValue({} as any); + }); + + describe('Parameter Validation', () => { + it('should return error when dataId is missing', async () => { + const req = { + body: { + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('dataId is required and must be a string'); + }); + + it('should return error when dataId is not a string', async () => { + const req = { + body: { + dataId: 123, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('dataId is required and must be a string'); + }); + + it('should return error when evalModel is missing', async () => { + const req = { + body: { + dataId: validDataId + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('evalModel is required and must be a string'); + }); + + it('should return error when evalModel is not a string', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: 123 + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('evalModel is required and must be a string'); + }); + + it('should return error when both dataId and evalModel are missing', async () => { + const req = { + body: {} + }; + + const result = await handler_test(req as any); + expect(result).toBe('dataId is required and must be a string'); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Data Validation', () => { + it('should verify dataset data exists', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalledWith(validDataId); + }); + + it('should return error when dataset data not found', async () => { + mockMongoEvalDatasetData.findById.mockResolvedValue(null); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Dataset data not found'); + }); + + it('should verify collection exists and belongs to team', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: validCollectionId, + teamId: validTeamId + }); + }); + + it('should return error when collection not found', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Dataset collection not found or access denied'); + }); + + it('should return error when collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findOne.mockResolvedValue(null); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Dataset collection not found or access denied'); + }); + }); + + describe('Quality Assessment Job Management', () => { + it('should check for active job before creating new one', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalledWith(validDataId); + }); + + it('should remove active job if one exists', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).toHaveBeenCalledWith(validDataId); + }); + + it('should not remove job if none exists', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + }); + + it('should add new quality assessment job', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: validEvalModel + }); + }); + + it('should update dataset data with quality metadata', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.findByIdAndUpdate).toHaveBeenCalledWith(validDataId, { + $set: { + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, + 'metadata.qualityModel': validEvalModel, + 'metadata.qualityQueueTime': expect.any(Date) + } + }); + }); + + it('should return success when job is queued successfully', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + }); + + describe('Error Handling', () => { + it('should handle job removal errors and return error message', async () => { + const jobError = new Error('Failed to remove job'); + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + mockRemoveEvalDatasetDataQualityJob.mockRejectedValue(jobError); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Failed to remove job'); + }); + + it('should handle job addition errors and return error message', async () => { + const jobError = new Error('Failed to add job'); + mockAddEvalDatasetDataQualityJob.mockRejectedValue(jobError); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Failed to add job'); + }); + + it('should handle database update errors and return error message', async () => { + const dbError = new Error('Database update failed'); + mockMongoEvalDatasetData.findByIdAndUpdate.mockRejectedValue(dbError); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Database update failed'); + }); + + it('should handle non-Error objects and return generic message', async () => { + mockAddEvalDatasetDataQualityJob.mockRejectedValue('String error'); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Failed to queue quality assessment job'); + }); + + it('should handle check job active errors and return error message', async () => { + const checkError = new Error('Failed to check job status'); + mockCheckEvalDatasetDataQualityJobActive.mockRejectedValue(checkError); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('Failed to check job status'); + }); + }); + + describe('Edge Cases', () => { + it('should handle empty string dataId', async () => { + const req = { + body: { + dataId: '', + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('dataId is required and must be a string'); + }); + + it('should handle empty string evalModel', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: '' + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('evalModel is required and must be a string'); + }); + + it('should handle null dataId', async () => { + const req = { + body: { + dataId: null, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('dataId is required and must be a string'); + }); + + it('should handle undefined evalModel', async () => { + const req = { + body: { + dataId: validDataId, + evalModel: undefined + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('evalModel is required and must be a string'); + }); + + it('should handle very long dataId', async () => { + const longDataId = 'a'.repeat(1000); + mockMongoEvalDatasetData.findById.mockResolvedValue({ + _id: longDataId, + datasetId: validCollectionId + } as any); + + const req = { + body: { + dataId: longDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle very long evalModel', async () => { + const longEvalModel = 'gpt-4-' + 'a'.repeat(1000); + + const req = { + body: { + dataId: validDataId, + evalModel: longEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle special characters in evalModel', async () => { + const specialEvalModel = 'gpt-4-特殊字符-🚀'; + + const req = { + body: { + dataId: validDataId, + evalModel: specialEvalModel + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: specialEvalModel + }); + }); + }); + + describe('Integration Workflow', () => { + it('should execute complete workflow when job exists', async () => { + // Reset all mocks and set up specific behavior for this test + vi.clearAllMocks(); + + // Set up all necessary mocks for this test + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + mockMongoEvalDatasetData.findById.mockResolvedValue({ + _id: validDataId, + datasetId: validCollectionId + } as any); + + mockMongoEvalDatasetCollection.findOne.mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + } as any); + + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(true); + mockRemoveEvalDatasetDataQualityJob.mockResolvedValue(undefined); + mockAddEvalDatasetDataQualityJob.mockResolvedValue({} as any); + mockMongoEvalDatasetData.findByIdAndUpdate.mockResolvedValue({} as any); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalledWith(validDataId); + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: validCollectionId, + teamId: validTeamId + }); + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalledWith(validDataId); + expect(mockRemoveEvalDatasetDataQualityJob).toHaveBeenCalledWith(validDataId); + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: validEvalModel + }); + expect(mockMongoEvalDatasetData.findByIdAndUpdate).toHaveBeenCalledWith(validDataId, { + $set: { + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, + 'metadata.qualityModel': validEvalModel, + 'metadata.qualityQueueTime': expect.any(Date) + } + }); + expect(result).toBe('success'); + }); + + it('should execute complete workflow when no job exists', async () => { + mockCheckEvalDatasetDataQualityJobActive.mockResolvedValue(false); + + const req = { + body: { + dataId: validDataId, + evalModel: validEvalModel + } + }; + + const result = await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + expect(mockMongoEvalDatasetData.findById).toHaveBeenCalledWith(validDataId); + expect(mockMongoEvalDatasetCollection.findOne).toHaveBeenCalledWith({ + _id: validCollectionId, + teamId: validTeamId + }); + expect(mockCheckEvalDatasetDataQualityJobActive).toHaveBeenCalledWith(validDataId); + expect(mockRemoveEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: validEvalModel + }); + expect(mockMongoEvalDatasetData.findByIdAndUpdate).toHaveBeenCalledWith(validDataId, { + $set: { + 'metadata.qualityStatus': EvalDatasetDataQualityStatusEnum.queuing, + 'metadata.qualityModel': validEvalModel, + 'metadata.qualityQueueTime': expect.any(Date) + } + }); + expect(result).toBe('success'); + }); + }); +}); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/update.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/update.test.ts new file mode 100644 index 000000000000..2c268786a198 --- /dev/null +++ b/test/cases/pages/api/core/evaluation/dataset/data/update.test.ts @@ -0,0 +1,961 @@ +import { describe, expect, it, vi, beforeEach } from 'vitest'; +import { handler_test } from '@/pages/api/core/evaluation/dataset/data/update'; +import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; +import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; +import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; +import { EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; +import { + removeEvalDatasetDataQualityJob, + addEvalDatasetDataQualityJob +} from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; +import { addLog } from '@fastgpt/service/common/system/log'; + +vi.mock('@fastgpt/service/support/permission/user/auth'); +vi.mock('@fastgpt/service/common/mongo/sessionRun'); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ + MongoEvalDatasetData: { + findById: vi.fn(), + updateOne: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema', () => ({ + MongoEvalDatasetCollection: { + findOne: vi.fn() + } +})); +vi.mock('@fastgpt/service/core/evaluation/dataset/dataQualityMq', () => ({ + removeEvalDatasetDataQualityJob: vi.fn(), + addEvalDatasetDataQualityJob: vi.fn() +})); +vi.mock('@fastgpt/service/common/system/log', () => ({ + addLog: { + info: vi.fn(), + error: vi.fn() + } +})); + +const mockAuthUserPer = vi.mocked(authUserPer); +const mockMongoSessionRun = vi.mocked(mongoSessionRun); +const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); +const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); +const mockRemoveEvalDatasetDataQualityJob = vi.mocked(removeEvalDatasetDataQualityJob); +const mockAddEvalDatasetDataQualityJob = vi.mocked(addEvalDatasetDataQualityJob); +const mockAddLog = vi.mocked(addLog); + +describe('EvalDatasetData Update API', () => { + const validTeamId = 'team123'; + const validTmbId = 'tmb123'; + const validDataId = '65f5b5b5b5b5b5b5b5b5b5b5'; + const validCollectionId = '65f5b5b5b5b5b5b5b5b5b5b6'; + + beforeEach(() => { + vi.clearAllMocks(); + + mockAuthUserPer.mockResolvedValue({ + teamId: validTeamId, + tmbId: validTmbId + }); + + const mockExistingData = { + _id: validDataId, + datasetId: validCollectionId, + [EvalDatasetDataKeyEnum.UserInput]: 'Old input', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Old output' + }; + + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue(mockExistingData) + } as any); + + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue({ + _id: validCollectionId, + teamId: validTeamId + }) + } as any); + + mockMongoEvalDatasetData.updateOne.mockResolvedValue({ acknowledged: true } as any); + + mockMongoSessionRun.mockImplementation(async (callback) => { + return callback({} as any); + }); + + mockRemoveEvalDatasetDataQualityJob.mockResolvedValue(true); + mockAddEvalDatasetDataQualityJob.mockResolvedValue({} as any); + }); + + describe('Parameter Validation', () => { + it('should reject when dataId is missing', async () => { + const req = { + body: { + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when dataId is not a string', async () => { + const req = { + body: { + dataId: 123, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'dataId is required and must be a string' + ); + }); + + it('should reject when userInput is missing', async () => { + const req = { + body: { + dataId: validDataId, + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is empty string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: '', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is only whitespace', async () => { + const req = { + body: { + dataId: validDataId, + userInput: ' ', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when userInput is not a string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 123, + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'userInput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is missing', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is empty string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: '', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is only whitespace', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: ' ', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when expectedOutput is not a string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 123, + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'expectedOutput is required and must be a non-empty string' + ); + }); + + it('should reject when actualOutput is not a string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + actualOutput: 123, + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'actualOutput must be a string if provided' + ); + }); + + it('should reject when context is not an array', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: 'not an array', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'context must be an array of strings if provided' + ); + }); + + it('should reject when context contains non-string items', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: ['valid', 123, 'also valid'], + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'context must be an array of strings if provided' + ); + }); + + it('should reject when retrievalContext is not an array', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: 'not an array', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'retrievalContext must be an array of strings if provided' + ); + }); + + it('should reject when retrievalContext contains non-string items', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: ['valid', 123, 'also valid'], + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'retrievalContext must be an array of strings if provided' + ); + }); + + it('should reject when enableQualityEvaluation is missing', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'enableQualityEvaluation is required and must be a boolean' + ); + }); + + it('should reject when enableQualityEvaluation is not a boolean', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: 'true' + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'enableQualityEvaluation is required and must be a boolean' + ); + }); + + it('should reject when enableQualityEvaluation is true but qualityEvaluationModel is missing', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'qualityEvaluationModel is required when enableQualityEvaluation is true' + ); + }); + + it('should reject when enableQualityEvaluation is true but qualityEvaluationModel is not a string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true, + qualityEvaluationModel: 123 + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'qualityEvaluationModel is required when enableQualityEvaluation is true' + ); + }); + }); + + describe('Authentication and Authorization', () => { + it('should call authUserPer with correct parameters', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockAuthUserPer).toHaveBeenCalledWith({ + req, + authToken: true, + authApiKey: true, + per: WritePermissionVal + }); + }); + + it('should propagate authentication errors', async () => { + const authError = new Error('Authentication failed'); + mockAuthUserPer.mockRejectedValue(authError); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toBe(authError); + }); + }); + + describe('Data Validation', () => { + it('should reject when dataset data does not exist', async () => { + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual('Dataset data not found'); + }); + + it('should reject when collection does not exist', async () => { + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Access denied or dataset collection not found' + ); + }); + + it('should reject when collection belongs to different team', async () => { + mockMongoEvalDatasetCollection.findOne.mockReturnValue({ + session: vi.fn().mockResolvedValue(null) + } as any); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toEqual( + 'Access denied or dataset collection not found' + ); + }); + }); + + describe('Data Update', () => { + it('should update data with required fields only', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + + expect(mockMongoSessionRun).toHaveBeenCalledWith(expect.any(Function)); + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + expect(result).toBe('success'); + }); + + it('should update data with all optional fields', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + actualOutput: 'Actual output', + expectedOutput: 'Test output', + context: ['Context 1', 'Context 2'], + retrievalContext: ['Retrieval 1', 'Retrieval 2'], + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: 'Actual output', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: ['Context 1', 'Context 2'], + [EvalDatasetDataKeyEnum.RetrievalContext]: ['Retrieval 1', 'Retrieval 2'], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should trim whitespace from userInput and expectedOutput', async () => { + const req = { + body: { + dataId: validDataId, + userInput: ' Test input ', + expectedOutput: ' Test output ', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should trim whitespace from actualOutput', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + actualOutput: ' Actual output ', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: 'Actual output', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should handle empty actualOutput', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + actualOutput: '', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should handle undefined actualOutput', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should handle empty context array', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: [], + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should handle empty retrievalContext array', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: [], + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockMongoEvalDatasetData.updateOne).toHaveBeenCalledWith( + { _id: validDataId }, + { + [EvalDatasetDataKeyEnum.UserInput]: 'Test input', + [EvalDatasetDataKeyEnum.ActualOutput]: '', + [EvalDatasetDataKeyEnum.ExpectedOutput]: 'Test output', + [EvalDatasetDataKeyEnum.Context]: [], + [EvalDatasetDataKeyEnum.RetrievalContext]: [], + updateTime: expect.any(Date) + }, + { session: {} } + ); + }); + + it('should propagate database update errors', async () => { + const dbError = new Error('Database update failed'); + mockMongoSessionRun.mockRejectedValue(dbError); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await expect(handler_test(req as any)).rejects.toBe(dbError); + }); + }); + + describe('Quality Evaluation', () => { + it('should not trigger quality evaluation when disabled', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + expect(mockAddEvalDatasetDataQualityJob).not.toHaveBeenCalled(); + expect(mockAddLog.info).not.toHaveBeenCalled(); + }); + + it('should trigger quality evaluation when enabled', async () => { + const qualityEvaluationModel = 'gpt-4'; + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true, + qualityEvaluationModel + } + }; + + await handler_test(req as any); + + expect(mockRemoveEvalDatasetDataQualityJob).toHaveBeenCalledWith(validDataId); + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: qualityEvaluationModel + }); + expect(mockAddLog.info).toHaveBeenCalledWith( + 'Quality evaluation task enqueued successfully', + { + dataId: validDataId, + evalModel: qualityEvaluationModel, + teamId: validTeamId + } + ); + }); + + it('should handle quality evaluation job removal failure gracefully', async () => { + const qualityEvaluationModel = 'gpt-4'; + const jobError = new Error('Failed to remove job'); + mockRemoveEvalDatasetDataQualityJob.mockRejectedValue(jobError); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true, + qualityEvaluationModel + } + }; + + const result = await handler_test(req as any); + + expect(mockAddLog.error).toHaveBeenCalledWith('Failed to manage quality evaluation task', { + dataId: validDataId, + evalModel: qualityEvaluationModel, + teamId: validTeamId, + error: jobError + }); + expect(result).toBe('success'); + }); + + it('should handle quality evaluation job addition failure gracefully', async () => { + const qualityEvaluationModel = 'gpt-4'; + const jobError = new Error('Failed to add job'); + mockAddEvalDatasetDataQualityJob.mockRejectedValue(jobError); + + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true, + qualityEvaluationModel + } + }; + + const result = await handler_test(req as any); + + expect(mockAddLog.error).toHaveBeenCalledWith('Failed to manage quality evaluation task', { + dataId: validDataId, + evalModel: qualityEvaluationModel, + teamId: validTeamId, + error: jobError + }); + expect(result).toBe('success'); + }); + }); + + describe('Edge Cases', () => { + it('should handle very long userInput', async () => { + const longInput = 'a'.repeat(10000); + const req = { + body: { + dataId: validDataId, + userInput: longInput, + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle very long expectedOutput', async () => { + const longOutput = 'a'.repeat(10000); + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: longOutput, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle special characters in inputs', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input with 特殊字符 and émojis 🚀', + expectedOutput: 'Test output with 特殊字符 and émojis 🎯', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle newlines and tabs in inputs', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input\nwith\tnewlines\tand\ttabs', + expectedOutput: 'Test output\nwith\tnewlines\tand\ttabs', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle large context arrays', async () => { + const largeContext = Array.from({ length: 100 }, (_, i) => `Context item ${i}`); + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + context: largeContext, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle large retrievalContext arrays', async () => { + const largeRetrievalContext = Array.from({ length: 100 }, (_, i) => `Retrieval item ${i}`); + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + retrievalContext: largeRetrievalContext, + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle MongoDB ObjectId-like strings for dataId', async () => { + const objectIdLikeDataId = '507f1f77bcf86cd799439011'; + mockMongoEvalDatasetData.findById.mockReturnValue({ + session: vi.fn().mockResolvedValue({ + _id: objectIdLikeDataId, + datasetId: validCollectionId + }) + } as any); + + const req = { + body: { + dataId: objectIdLikeDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + }); + + it('should handle quality evaluation with different models', async () => { + const testCases = ['gpt-4', 'claude-3', 'gemini-pro']; + + for (const model of testCases) { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: true, + qualityEvaluationModel: model + } + }; + + await handler_test(req as any); + + expect(mockAddEvalDatasetDataQualityJob).toHaveBeenCalledWith({ + dataId: validDataId, + evalModel: model + }); + } + }); + }); + + describe('Return Value', () => { + it('should return success string', async () => { + const req = { + body: { + dataId: validDataId, + userInput: 'Test input', + expectedOutput: 'Test output', + enableQualityEvaluation: false + } + }; + + const result = await handler_test(req as any); + expect(result).toBe('success'); + expect(typeof result).toBe('string'); + }); + }); +}); From 7022a6f24063aaf4325529aeb564b8274fb8b823 Mon Sep 17 00:00:00 2001 From: Jon Date: Mon, 1 Sep 2025 14:42:19 +0800 Subject: [PATCH 015/255] feat: Add support for evaluation files and permissions --- packages/global/common/file/constants.ts | 7 +- .../service/common/file/gridfs/controller.ts | 3 +- packages/service/common/file/gridfs/schema.ts | 6 + .../dataset/dataQualityProcessor.ts | 7 +- .../dataset/dataSynthesizeProcessor.ts | 1 - packages/service/core/evaluation/index.ts | 32 +---- .../support/permission/evaluation/auth.ts | 132 +++++++++++++++++- packages/web/i18n/en/file.json | 1 + packages/web/i18n/zh-CN/file.json | 1 + packages/web/i18n/zh-Hant/file.json | 1 + .../app/src/pages/api/common/file/upload.ts | 20 ++- .../core/evaluation/dataset/data/fileId.ts | 14 +- .../evaluation/dataset/data/fileId.test.ts | 59 +++----- 13 files changed, 197 insertions(+), 87 deletions(-) diff --git a/packages/global/common/file/constants.ts b/packages/global/common/file/constants.ts index ac48e3a3e3ff..abd996f1d7ed 100644 --- a/packages/global/common/file/constants.ts +++ b/packages/global/common/file/constants.ts @@ -3,7 +3,8 @@ import { i18nT } from '../../../web/i18n/utils'; /* mongo fs bucket */ export enum BucketNameEnum { dataset = 'dataset', - chat = 'chat' + chat = 'chat', + evaluation = 'evaluation' } export const bucketNameMap = { [BucketNameEnum.dataset]: { @@ -13,6 +14,10 @@ export const bucketNameMap = { [BucketNameEnum.chat]: { label: i18nT('file:bucket_chat'), previewExpireMinutes: 7 * 24 * 60 // 7 days + }, + [BucketNameEnum.evaluation]: { + label: i18nT('file:eval_file'), + previewExpireMinutes: 30 // 30 minutes } }; diff --git a/packages/service/common/file/gridfs/controller.ts b/packages/service/common/file/gridfs/controller.ts index afa99571e7a1..2153e8252f58 100644 --- a/packages/service/common/file/gridfs/controller.ts +++ b/packages/service/common/file/gridfs/controller.ts @@ -3,7 +3,7 @@ import type { BucketNameEnum } from '@fastgpt/global/common/file/constants'; import fsp from 'fs/promises'; import fs from 'fs'; import { type DatasetFileSchema } from '@fastgpt/global/core/dataset/type'; -import { MongoChatFileSchema, MongoDatasetFileSchema } from './schema'; +import { MongoChatFileSchema, MongoDatasetFileSchema, MongoEvaluationFileSchema } from './schema'; import { detectFileEncoding, detectFileEncodingByPath } from '@fastgpt/global/common/file/tools'; import { CommonErrEnum } from '@fastgpt/global/common/error/code/common'; import { readRawContentByFileBuffer } from '../read/utils'; @@ -18,6 +18,7 @@ import { retryFn } from '@fastgpt/global/common/system/utils'; export function getGFSCollection(bucket: `${BucketNameEnum}`) { MongoDatasetFileSchema; MongoChatFileSchema; + MongoEvaluationFileSchema; return connectionMongo.connection.db!.collection(`${bucket}.files`); } diff --git a/packages/service/common/file/gridfs/schema.ts b/packages/service/common/file/gridfs/schema.ts index 61b98ea96caa..51e9b36dd4f0 100644 --- a/packages/service/common/file/gridfs/schema.ts +++ b/packages/service/common/file/gridfs/schema.ts @@ -6,11 +6,17 @@ const DatasetFileSchema = new Schema({ const ChatFileSchema = new Schema({ metadata: Object }); +const EvaluationFileSchema = new Schema({ + metadata: Object +}); DatasetFileSchema.index({ uploadDate: -1 }); ChatFileSchema.index({ uploadDate: -1 }); ChatFileSchema.index({ 'metadata.chatId': 1 }); +EvaluationFileSchema.index({ uploadDate: -1 }); + export const MongoDatasetFileSchema = getMongoModel('dataset.files', DatasetFileSchema); export const MongoChatFileSchema = getMongoModel('chat.files', ChatFileSchema); +export const MongoEvaluationFileSchema = getMongoModel('evaluation.files', EvaluationFileSchema); diff --git a/packages/service/core/evaluation/dataset/dataQualityProcessor.ts b/packages/service/core/evaluation/dataset/dataQualityProcessor.ts index 3e8984183a21..4d927633d307 100644 --- a/packages/service/core/evaluation/dataset/dataQualityProcessor.ts +++ b/packages/service/core/evaluation/dataset/dataQualityProcessor.ts @@ -1,7 +1,7 @@ import type { Job } from 'bullmq'; import { addLog } from '../../../common/system/log'; import { MongoEvalDatasetData } from './evalDatasetDataSchema'; -import type { EvalDatasetDataQualityData } from './dataQualityMq'; +import { getEvalDatasetDataQualityWorker, type EvalDatasetDataQualityData } from './dataQualityMq'; import { EvalDatasetDataKeyEnum, EvalDatasetDataQualityStatusEnum @@ -182,3 +182,8 @@ export const processEvalDatasetDataQuality = async (job: Job { + return getEvalDatasetDataQualityWorker(processEvalDatasetDataQuality); +}; diff --git a/packages/service/core/evaluation/dataset/dataSynthesizeProcessor.ts b/packages/service/core/evaluation/dataset/dataSynthesizeProcessor.ts index 57a63882642f..39aaaab2c1a3 100644 --- a/packages/service/core/evaluation/dataset/dataSynthesizeProcessor.ts +++ b/packages/service/core/evaluation/dataset/dataSynthesizeProcessor.ts @@ -1,5 +1,4 @@ import type { Job } from 'bullmq'; -import type { HydratedDocument } from 'mongoose'; import { addLog } from '../../../common/system/log'; import { MongoEvalDatasetCollection } from './evalDatasetCollectionSchema'; import { MongoEvalDatasetData } from './evalDatasetDataSchema'; diff --git a/packages/service/core/evaluation/index.ts b/packages/service/core/evaluation/index.ts index a9a97ad95584..3cc466101d10 100644 --- a/packages/service/core/evaluation/index.ts +++ b/packages/service/core/evaluation/index.ts @@ -24,10 +24,7 @@ import type { } from '@fastgpt/global/core/evaluation/type'; import type { Document } from 'mongoose'; import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; -import { - InformLevelEnum, - SendInformTemplateCodeEnum -} from '@fastgpt/global/support/user/inform/constants'; +import { InformLevelEnum } from '@fastgpt/global/support/user/inform/constants'; import type { AppChatConfigType, AppSchema } from '@fastgpt/global/core/app/type'; import type { StoreNodeItemType } from '@fastgpt/global/core/workflow/type/node'; import type { StoreEdgeItemType } from '@fastgpt/global/core/workflow/type/edge'; @@ -40,8 +37,9 @@ import { delay } from '@fastgpt/global/common/system/utils'; import { removeDatasetCiteText } from '../../core/ai/utils'; import { getUserChatInfoAndAuthTeamPoints } from '../../support/permission/auth/team'; import { getRunningUserInfoByTmbId } from '../../support/user/team/utils'; -import { getEvalDatasetDataQualityWorker } from './dataset/dataQualityMq'; -import { processEvalDatasetDataQuality } from './dataset/dataQualityProcessor'; +import { initEvalDatasetDataQualityWorker } from './dataset/dataQualityProcessor'; +import { initEvalDatasetSmartGenerateWorker } from './dataset/smartGenerateProcessor'; +import { initEvalDatasetDataSynthesizeWorker } from './dataset/dataSynthesizeProcessor'; type AppContextType = { appData: AppSchema; @@ -54,26 +52,10 @@ type AppContextType = { export const initEvaluationWorker = () => { addLog.info('Init Evaluation Worker...'); - getEvalDatasetDataQualityWorker(processEvalDatasetDataQuality); + initEvalDatasetDataQualityWorker(); + initEvalDatasetSmartGenerateWorker(); + initEvalDatasetDataSynthesizeWorker(); getEvaluationWorker(processor); - - import('./dataset/smartGenerateProcessor') - .then(({ initEvalDatasetSmartGenerateWorker }) => { - initEvalDatasetSmartGenerateWorker(); - addLog.info('Smart generate worker initialized'); - }) - .catch((error) => { - addLog.error('Failed to init smart generate worker', { error }); - }); - - import('./dataset/dataSynthesizeProcessor') - .then(({ initEvalDatasetDataSynthesizeWorker }) => { - initEvalDatasetDataSynthesizeWorker(); - addLog.info('Data synthesize worker initialized'); - }) - .catch((error) => { - addLog.error('Failed to init data synthesize worker', { error }); - }); }; const dealAiPointCheckError = async (evalId: string, error: any) => { diff --git a/packages/service/support/permission/evaluation/auth.ts b/packages/service/support/permission/evaluation/auth.ts index 4629d9ec31ca..fa63499fb574 100644 --- a/packages/service/support/permission/evaluation/auth.ts +++ b/packages/service/support/permission/evaluation/auth.ts @@ -2,11 +2,24 @@ import { parseHeaderCert } from '../controller'; import { authAppByTmbId } from '../app/auth'; import { ManagePermissionVal, - ReadPermissionVal + ReadPermissionVal, + OwnerPermissionVal, + ReadRoleVal } from '@fastgpt/global/support/permission/constant'; -import type { EvaluationSchemaType } from '@fastgpt/global/core/evaluation/type'; -import type { AuthModeType } from '../type'; +import type { + EvaluationSchemaType, + EvalDatasetCollectionSchemaType +} from '@fastgpt/global/core/evaluation/type'; +import type { AuthModeType, AuthResponseType } from '../type'; import { MongoEvaluation } from '../../../core/evaluation/evalSchema'; +import { MongoEvalDatasetCollection } from '../../../core/evaluation/dataset/evalDatasetCollectionSchema'; +import { getTmbInfoByTmbId } from '../../user/team/controller'; +import type { PermissionValueType } from '@fastgpt/global/support/permission/type'; +import { getFileById } from '../../../common/file/gridfs/controller'; +import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; +import { CommonErrEnum } from '@fastgpt/global/common/error/code/common'; +import { Permission } from '@fastgpt/global/support/permission/controller'; +import type { DatasetFileSchema } from '@fastgpt/global/core/dataset/type'; export const authEval = async ({ evalId, @@ -62,3 +75,116 @@ export const authEval = async ({ evaluation }; }; + +export const authEvalDatasetCollectionByTmbId = async ({ + tmbId, + collectionId, + per, + isRoot = false +}: { + tmbId: string; + collectionId: string; + per: PermissionValueType; + isRoot?: boolean; +}): Promise<{ + collection: EvalDatasetCollectionSchemaType; +}> => { + const [{ teamId, permission: tmbPer }, collection] = await Promise.all([ + getTmbInfoByTmbId({ tmbId }), + MongoEvalDatasetCollection.findOne({ _id: collectionId }).lean() + ]); + // TODO: error code + if (!collection) { + return Promise.reject('Evaluation dataset collection not found'); + } + + if (String(collection.teamId) !== teamId) { + return Promise.reject('Unauthorized access to evaluation dataset collection'); + } + + // Check if user is owner or has permission + const isOwner = tmbPer.isOwner || String(collection.tmbId) === String(tmbId); + + if (!isRoot && !isOwner) { + return Promise.reject('Unauthorized access to evaluation dataset collection'); + } + + return { collection }; +}; + +export const authEvalDatasetCollection = async ({ + collectionId, + per, + ...props +}: AuthModeType & { + collectionId: string; + per: PermissionValueType; +}): Promise<{ + userId: string; + teamId: string; + tmbId: string; + collection: EvalDatasetCollectionSchemaType; + isRoot: boolean; +}> => { + const result = await parseHeaderCert(props); + const { tmbId } = result; + + if (!collectionId) { + return Promise.reject('Collection ID is required'); + } + + const { collection } = await authEvalDatasetCollectionByTmbId({ + tmbId, + collectionId, + per, + isRoot: result.isRoot + }); + + return { + userId: result.userId, + teamId: result.teamId, + tmbId: result.tmbId, + collection, + isRoot: result.isRoot + }; +}; + +export const authEvalDatasetCollectionFile = async ({ + fileId, + per = OwnerPermissionVal, + ...props +}: AuthModeType & { + fileId: string; +}): Promise< + AuthResponseType & { + file: DatasetFileSchema; + } +> => { + const authRes = await parseHeaderCert(props); + const { teamId, tmbId } = authRes; + + const file = await getFileById({ bucketName: BucketNameEnum.evaluation, fileId }); + + if (!file) { + return Promise.reject(CommonErrEnum.fileNotFound); + } + + if (file.metadata?.teamId !== teamId) { + return Promise.reject(CommonErrEnum.unAuthFile); + } + + const permission = new Permission({ + role: ReadRoleVal, + isOwner: file.metadata?.uid === tmbId || file.metadata?.tmbId === tmbId + }); + + if (!permission.checkPer(per)) { + return Promise.reject(CommonErrEnum.unAuthFile); + } + + return { + ...authRes, + permission, + file + }; +}; diff --git a/packages/web/i18n/en/file.json b/packages/web/i18n/en/file.json index d53d0a88c0a5..54cbb7d621a1 100644 --- a/packages/web/i18n/en/file.json +++ b/packages/web/i18n/en/file.json @@ -15,6 +15,7 @@ "Please wait for all files to upload": "Please wait for all files to be uploaded to complete", "bucket_chat": "Conversation Files", "bucket_file": "Dataset Documents", + "eval_file": "Evaluation Files", "bucket_image": "picture", "click_to_view_raw_source": "Click to View Original Source", "common.Some images failed to process": "Some images failed to process", diff --git a/packages/web/i18n/zh-CN/file.json b/packages/web/i18n/zh-CN/file.json index bfd7df19c518..f4699f6c7c83 100644 --- a/packages/web/i18n/zh-CN/file.json +++ b/packages/web/i18n/zh-CN/file.json @@ -15,6 +15,7 @@ "Please wait for all files to upload": "请等待所有文件上传完成", "bucket_chat": "对话文件", "bucket_file": "知识库文件", + "eval_file": "评测文件", "bucket_image": "图片", "click_to_view_raw_source": "点击查看来源", "common.Some images failed to process": "部分图片处理失败", diff --git a/packages/web/i18n/zh-Hant/file.json b/packages/web/i18n/zh-Hant/file.json index 8445cf42e5de..6c76eede922d 100644 --- a/packages/web/i18n/zh-Hant/file.json +++ b/packages/web/i18n/zh-Hant/file.json @@ -15,6 +15,7 @@ "Please wait for all files to upload": "請等待所有文件上傳完成", "bucket_chat": "對話檔案", "bucket_file": "知識庫檔案", + "eval_file": "評測檔案", "bucket_image": "圖片", "click_to_view_raw_source": "點選檢視原始來源", "common.Some images failed to process": "部分圖片處理失敗", diff --git a/projects/app/src/pages/api/common/file/upload.ts b/projects/app/src/pages/api/common/file/upload.ts index 63340f78488b..2b73889a06d5 100644 --- a/projects/app/src/pages/api/common/file/upload.ts +++ b/projects/app/src/pages/api/common/file/upload.ts @@ -11,6 +11,7 @@ import { authFrequencyLimit } from '@/service/common/frequencyLimit/api'; import { addSeconds } from 'date-fns'; import { authChatCrud } from '@/service/support/permission/auth/chat'; import { authDataset } from '@fastgpt/service/support/permission/dataset/auth'; +import { authEvalDatasetCollection } from '@fastgpt/service/support/permission/evaluation/auth'; import { type OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; @@ -20,6 +21,9 @@ export type UploadChatFileProps = { export type UploadDatasetFileProps = { datasetId: string; }; +export type UploadEvaluationFileProps = { + collectionId: string; +}; const authUploadLimit = (tmbId: string) => { if (!global.feConfigs.uploadFileMaxAmount) return; @@ -39,7 +43,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) { maxSize: global.feConfigs?.uploadFileMaxSize }); const { file, bucketName, metadata, data } = await upload.getUploadFile< - UploadChatFileProps | UploadDatasetFileProps + UploadChatFileProps | UploadDatasetFileProps | UploadEvaluationFileProps >(req, res); filePaths.push(file.path); @@ -71,6 +75,20 @@ async function handler(req: NextApiRequest, res: NextApiResponse) { uid: authData.tmbId }; } + if (bucketName === 'evaluation') { + const evalData = data as UploadEvaluationFileProps; + const authData = await authEvalDatasetCollection({ + collectionId: evalData.collectionId, + per: WritePermissionVal, + req, + authToken: true, + authApiKey: true + }); + return { + teamId: authData.teamId, + uid: authData.tmbId + }; + } return Promise.reject('bucketName is empty'); })(); diff --git a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts index 34e1b2a71df2..771a44080e0d 100644 --- a/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts +++ b/projects/app/src/pages/api/core/evaluation/dataset/data/fileId.ts @@ -1,7 +1,6 @@ import type { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; -import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; @@ -10,10 +9,10 @@ import { EvalDatasetDataKeyEnum } from '@fastgpt/global/core/evaluation/constants'; import { readFileContentFromMongo } from '@fastgpt/service/common/file/gridfs/controller'; -import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; import { BucketNameEnum } from '@fastgpt/global/common/file/constants'; import type { importEvalDatasetFromFileBody } from '@fastgpt/global/core/evaluation/api'; import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; +import { authEvalDatasetCollectionFile } from '@fastgpt/service/support/permission/evaluation/auth'; export type EvalDatasetImportFromFileQuery = {}; export type EvalDatasetImportFromFileBody = importEvalDatasetFromFileBody; @@ -169,14 +168,7 @@ async function handler( return 'qualityEvaluationModel is required when enableQualityEvaluation is true'; } - const { teamId, tmbId } = await authUserPer({ - req, - authToken: true, - authApiKey: true, - per: WritePermissionVal - }); - - const { file } = await authCollectionFile({ + const { file, teamId, tmbId } = await authEvalDatasetCollectionFile({ req, authToken: true, authApiKey: true, @@ -204,7 +196,7 @@ async function handler( const { rawText } = await readFileContentFromMongo({ teamId, tmbId, - bucketName: BucketNameEnum.dataset, + bucketName: BucketNameEnum.evaluation, fileId, getFormatText: false }); diff --git a/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts b/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts index fd164b0b1be5..ff0342a7e1d9 100644 --- a/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts +++ b/test/cases/pages/api/core/evaluation/dataset/data/fileId.test.ts @@ -1,7 +1,6 @@ import { describe, expect, it, vi, beforeEach } from 'vitest'; import { handler_test } from '@/pages/api/core/evaluation/dataset/data/fileId'; -import { authUserPer } from '@fastgpt/service/support/permission/user/auth'; -import { authCollectionFile } from '@fastgpt/service/support/permission/auth/file'; +import { authEvalDatasetCollectionFile } from '@fastgpt/service/support/permission/evaluation/auth'; import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; import { MongoEvalDatasetData } from '@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema'; import { MongoEvalDatasetCollection } from '@fastgpt/service/core/evaluation/dataset/evalDatasetCollectionSchema'; @@ -14,8 +13,7 @@ import { } from '@fastgpt/global/core/evaluation/constants'; import { addEvalDatasetDataQualityJob } from '@fastgpt/service/core/evaluation/dataset/dataQualityMq'; -vi.mock('@fastgpt/service/support/permission/user/auth'); -vi.mock('@fastgpt/service/support/permission/auth/file'); +vi.mock('@fastgpt/service/support/permission/evaluation/auth'); vi.mock('@fastgpt/service/common/mongo/sessionRun'); vi.mock('@fastgpt/service/core/evaluation/dataset/evalDatasetDataSchema', () => ({ MongoEvalDatasetData: { @@ -34,8 +32,7 @@ vi.mock('@fastgpt/service/core/evaluation/dataset/dataQualityMq', () => ({ addEvalDatasetDataQualityJob: vi.fn() })); -const mockAuthUserPer = vi.mocked(authUserPer); -const mockAuthCollectionFile = vi.mocked(authCollectionFile); +const mockAuthEvalDatasetCollectionFile = vi.mocked(authEvalDatasetCollectionFile); const mockMongoSessionRun = vi.mocked(mongoSessionRun); const mockMongoEvalDatasetData = vi.mocked(MongoEvalDatasetData); const mockMongoEvalDatasetCollection = vi.mocked(MongoEvalDatasetCollection); @@ -59,12 +56,7 @@ describe('EvalDatasetData FileId Import API', () => { beforeEach(() => { vi.clearAllMocks(); - mockAuthUserPer.mockResolvedValue({ - teamId: validTeamId, - tmbId: validTmbId - }); - - mockAuthCollectionFile.mockResolvedValue({ + mockAuthEvalDatasetCollectionFile.mockResolvedValue({ teamId: validTeamId, tmbId: validTmbId, file: { @@ -204,26 +196,7 @@ describe('EvalDatasetData FileId Import API', () => { }); describe('Authentication and Authorization', () => { - it('should call authUserPer with correct parameters', async () => { - const req = { - body: { - fileId: validFileId, - collectionId: validCollectionId, - enableQualityEvaluation: false - } - }; - - await handler_test(req as any); - - expect(mockAuthUserPer).toHaveBeenCalledWith({ - req, - authToken: true, - authApiKey: true, - per: WritePermissionVal - }); - }); - - it('should call authCollectionFile with correct parameters', async () => { + it('should call authEvalCollectionFile with correct parameters', async () => { const req = { body: { fileId: validFileId, @@ -234,7 +207,7 @@ describe('EvalDatasetData FileId Import API', () => { await handler_test(req as any); - expect(mockAuthCollectionFile).toHaveBeenCalledWith({ + expect(mockAuthEvalDatasetCollectionFile).toHaveBeenCalledWith({ req, authToken: true, authApiKey: true, @@ -244,8 +217,8 @@ describe('EvalDatasetData FileId Import API', () => { }); it('should propagate authentication errors', async () => { - const authError = new Error('Authentication failed'); - mockAuthUserPer.mockRejectedValue(authError); + const authError = new Error('unAuthorization'); + mockAuthEvalDatasetCollectionFile.mockRejectedValue(authError); const req = { body: { @@ -255,12 +228,12 @@ describe('EvalDatasetData FileId Import API', () => { } }; - await expect(handler_test(req as any)).rejects.toBe(authError); + await expect(handler_test(req as any)).rejects.toThrow('unAuthorization'); }); it('should propagate file authentication errors', async () => { - const fileAuthError = new Error('File authentication failed'); - mockAuthCollectionFile.mockRejectedValue(fileAuthError); + const fileAuthError = new Error('unAuthorization'); + mockAuthEvalDatasetCollectionFile.mockRejectedValue(fileAuthError); const req = { body: { @@ -270,13 +243,13 @@ describe('EvalDatasetData FileId Import API', () => { } }; - await expect(handler_test(req as any)).rejects.toBe(fileAuthError); + await expect(handler_test(req as any)).rejects.toThrow('unAuthorization'); }); }); describe('File Validation', () => { it('should reject non-CSV files', async () => { - mockAuthCollectionFile.mockResolvedValue({ + mockAuthEvalDatasetCollectionFile.mockResolvedValue({ teamId: validTeamId, tmbId: validTmbId, file: { @@ -299,7 +272,7 @@ describe('EvalDatasetData FileId Import API', () => { }); it('should handle files with uppercase CSV extension', async () => { - mockAuthCollectionFile.mockResolvedValue({ + mockAuthEvalDatasetCollectionFile.mockResolvedValue({ teamId: validTeamId, tmbId: validTmbId, file: { @@ -329,7 +302,7 @@ describe('EvalDatasetData FileId Import API', () => { }); it('should handle files without filename', async () => { - mockAuthCollectionFile.mockResolvedValue({ + mockAuthEvalDatasetCollectionFile.mockResolvedValue({ teamId: validTeamId, tmbId: validTmbId, file: { @@ -407,7 +380,7 @@ describe('EvalDatasetData FileId Import API', () => { expect(mockReadFileContentFromMongo).toHaveBeenCalledWith({ teamId: validTeamId, tmbId: validTmbId, - bucketName: BucketNameEnum.dataset, + bucketName: BucketNameEnum.evaluation, fileId: validFileId, getFormatText: false }); From 357624d7442e6b2c488a5444ebfff4b01281089f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=83=91=E6=96=AF=E6=88=8814864?= <14864@sangfor.com> Date: Tue, 2 Sep 2025 14:54:36 +0800 Subject: [PATCH 016/255] feat: Add evaluation task, dataset and dimension management features - Add evaluation dataset management page and detail page - Add evaluation dimension management page - Refactor evaluation homepage to tab layout, supporting task/dataset/dimension switching - Add basic structure for evaluation task detail page - Update navigation bar route configuration to support new page routes --- packages/web/i18n/constants.ts | 4 +- .../web/i18n/en/dashboard_evaluation.json | 27 +- packages/web/i18n/en/evaluation_dataset.json | 22 + .../web/i18n/en/evaluation_dimension.json | 12 + packages/web/i18n/i18next.d.ts | 4 + .../web/i18n/zh-CN/dashboard_evaluation.json | 27 +- .../web/i18n/zh-CN/evaluation_dataset.json | 22 + .../web/i18n/zh-CN/evaluation_dimension.json | 12 + .../i18n/zh-Hant/dashboard_evaluation.json | 27 +- .../web/i18n/zh-Hant/evaluation_dataset.json | 22 + .../i18n/zh-Hant/evaluation_dimension.json | 12 + projects/app/src/components/Layout/navbar.tsx | 6 +- .../app/src/components/Layout/navbarPhone.tsx | 6 +- .../components/OldEvaluationTasks.tsx | 239 ++++++++ .../evaluation/components/create.tsx | 378 ++++++++++++ .../evaluation/dataset/detail/index.tsx | 0 .../dashboard/evaluation/dataset/index.tsx | 493 ++++++++++++++++ .../dashboard/evaluation/dimension/index.tsx | 207 +++++++ .../src/pages/dashboard/evaluation/index.tsx | 305 ++-------- .../evaluation/task/detail/index.tsx | 40 ++ .../pages/dashboard/evaluation/task/index.tsx | 541 ++++++++++++++++++ 21 files changed, 2132 insertions(+), 274 deletions(-) create mode 100644 packages/web/i18n/en/evaluation_dataset.json create mode 100644 packages/web/i18n/en/evaluation_dimension.json create mode 100644 packages/web/i18n/zh-CN/evaluation_dataset.json create mode 100644 packages/web/i18n/zh-CN/evaluation_dimension.json create mode 100644 packages/web/i18n/zh-Hant/evaluation_dataset.json create mode 100644 packages/web/i18n/zh-Hant/evaluation_dimension.json create mode 100644 projects/app/src/pages/dashboard/evaluation/components/OldEvaluationTasks.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/components/create.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/dataset/detail/index.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/dataset/index.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/dimension/index.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/task/detail/index.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/task/index.tsx diff --git a/packages/web/i18n/constants.ts b/packages/web/i18n/constants.ts index d568e47831ee..263a64946dd6 100644 --- a/packages/web/i18n/constants.ts +++ b/packages/web/i18n/constants.ts @@ -20,7 +20,9 @@ export const I18N_NAMESPACES = [ 'account_team', 'account_model', 'dashboard_mcp', - 'dashboard_evaluation' + 'dashboard_evaluation', + 'evaluation_dataset', + 'evaluation_dimension' ]; export const I18N_NAMESPACES_MAP = I18N_NAMESPACES.reduce( diff --git a/packages/web/i18n/en/dashboard_evaluation.json b/packages/web/i18n/en/dashboard_evaluation.json index 1809bcb4df82..a529fc21b8e9 100644 --- a/packages/web/i18n/en/dashboard_evaluation.json +++ b/packages/web/i18n/en/dashboard_evaluation.json @@ -46,5 +46,30 @@ "task_name": "Task Name", "team_has_running_evaluation": "The current team already has running application reviews. Please wait until it is completed before creating a new application review.", "template_csv_file_select_tip": "Only support {{fileType}} files that are strictly in accordance with template format", - "variables": "Global variables" + "variables": "Global variables", + "all_apps": "全部应用", + "search_evaluation_task": "搜索任务名或应用版本", + "create_new_task": "新建任务", + "task_name_column": "任务名", + "progress_column": "进度", + "evaluation_app_column": "评测应用", + "app_version_column": "应用版本", + "evaluation_result_column": "评测结果", + "start_finish_time_column": "开始时间/完成时间", + "executor_column": "执行人", + "waiting": "等待中", + "evaluating_status": "评测中", + "completed_status": "已完成", + "queuing_status": "排队中", + "running_status": "进行中", + "error_data_tooltip": "{{count}} 条数据执行异常,可点击查看详情", + "rename": "重命名", + "delete": "删除", + "confirm_delete_task": "确认删除该任务?", + "evaluation_tasks_tab": "评测任务", + "evaluation_tasks": "评测任务", + "evaluation_datasets_tab": "评测数据集", + "evaluation_dimensions_tab": "评测维度", + "create_new": "新建", + "retry_error_data": "重试异常数据" } diff --git a/packages/web/i18n/en/evaluation_dataset.json b/packages/web/i18n/en/evaluation_dataset.json new file mode 100644 index 000000000000..c1bf88a67062 --- /dev/null +++ b/packages/web/i18n/en/evaluation_dataset.json @@ -0,0 +1,22 @@ +{ + "dataset_name_placeholder": "名称", + "create_new_dataset": "新建数据集", + "smart_generation": "智能生成", + "file_import": "文件导入", + "rename": "重命名", + "delete": "删除", + "confirm_delete_dataset": "确认删除该数据集吗?", + "error_details": "异常详情", + "status_queuing": "排队中", + "status_parsing": "文件解析中", + "status_generating": "数据生成中", + "status_generate_error": "生成异常", + "status_ready": "已就绪", + "status_parse_error": "解析异常", + "click_to_view_details": "点击查看详情", + "table_header_name": "名称", + "table_header_data_count": "数据量", + "table_header_time": "创建/更新时间", + "table_header_status": "状态", + "table_header_creator": "创建人" +} \ No newline at end of file diff --git a/packages/web/i18n/en/evaluation_dimension.json b/packages/web/i18n/en/evaluation_dimension.json new file mode 100644 index 000000000000..01993b6cb7f4 --- /dev/null +++ b/packages/web/i18n/en/evaluation_dimension.json @@ -0,0 +1,12 @@ +{ + "create_dimension": "新建维度", + "search_dimension": "搜索评测维度", + "delete_failed": "删除失败", + "delete_success": "删除成功", + "builtin": "内置", + "confirm_delete_dimension": "确认删除该维度?", + "dimension_name": "维度名", + "description": "介绍", + "create_update_time": "创建/更新时间", + "creator": "创建人" +} \ No newline at end of file diff --git a/packages/web/i18n/i18next.d.ts b/packages/web/i18n/i18next.d.ts index b831611dabdd..8c55c1cd45ca 100644 --- a/packages/web/i18n/i18next.d.ts +++ b/packages/web/i18n/i18next.d.ts @@ -20,6 +20,8 @@ import type chat from './zh-CN/chat.json'; import type login from './zh-CN/login.json'; import type account_model from './zh-CN/account_model.json'; import type dashboard_mcp from './zh-CN/dashboard_mcp.json'; +import type evaluation_dimension from './zh-CN/evaluation_dimension.json'; +import type evaluation_dataset from './zh-CN/evaluation_dataset.json'; import type { I18N_NAMESPACES } from './constants'; export interface I18nNamespaces { @@ -45,6 +47,8 @@ export interface I18nNamespaces { account_model: typeof account_model; dashboard_mcp: typeof dashboard_mcp; dashboard_evaluation: typeof dashboard_evaluation; + evaluation_dataset: typeof evaluation_dataset; + evaluation_dimension: typeof evaluation_dimension; } export type I18nNsType = (keyof I18nNamespaces)[]; diff --git a/packages/web/i18n/zh-CN/dashboard_evaluation.json b/packages/web/i18n/zh-CN/dashboard_evaluation.json index abdc713033d8..fbf5123e4310 100644 --- a/packages/web/i18n/zh-CN/dashboard_evaluation.json +++ b/packages/web/i18n/zh-CN/dashboard_evaluation.json @@ -49,5 +49,30 @@ "task_name": "任务名称", "team_has_running_evaluation": "当前团队已有正在运行的应用评测,请等待完成后再创建新的应用评测", "template_csv_file_select_tip": "仅支持严格按照模板填写的 {{fileType}} 文件", - "variables": "全局变量" + "variables": "全局变量", + "all_apps": "全部应用", + "search_evaluation_task": "搜索任务名或应用版本", + "create_new_task": "新建任务", + "task_name_column": "任务名", + "progress_column": "进度", + "evaluation_app_column": "评测应用", + "app_version_column": "应用版本", + "evaluation_result_column": "评测结果", + "start_finish_time_column": "开始时间/完成时间", + "executor_column": "执行人", + "waiting": "等待中", + "evaluating_status": "评测中", + "completed_status": "已完成", + "queuing_status": "排队中", + "running_status": "进行中", + "error_data_tooltip": "{{count}} 条数据执行异常,可点击查看详情", + "rename": "重命名", + "delete": "删除", + "confirm_delete_task": "确认删除该任务?", + "evaluation_tasks_tab": "评测任务", + "evaluation_tasks": "评测任务", + "evaluation_datasets_tab": "评测数据集", + "evaluation_dimensions_tab": "评测维度", + "create_new": "新建", + "retry_error_data": "重试异常数据" } diff --git a/packages/web/i18n/zh-CN/evaluation_dataset.json b/packages/web/i18n/zh-CN/evaluation_dataset.json new file mode 100644 index 000000000000..c1bf88a67062 --- /dev/null +++ b/packages/web/i18n/zh-CN/evaluation_dataset.json @@ -0,0 +1,22 @@ +{ + "dataset_name_placeholder": "名称", + "create_new_dataset": "新建数据集", + "smart_generation": "智能生成", + "file_import": "文件导入", + "rename": "重命名", + "delete": "删除", + "confirm_delete_dataset": "确认删除该数据集吗?", + "error_details": "异常详情", + "status_queuing": "排队中", + "status_parsing": "文件解析中", + "status_generating": "数据生成中", + "status_generate_error": "生成异常", + "status_ready": "已就绪", + "status_parse_error": "解析异常", + "click_to_view_details": "点击查看详情", + "table_header_name": "名称", + "table_header_data_count": "数据量", + "table_header_time": "创建/更新时间", + "table_header_status": "状态", + "table_header_creator": "创建人" +} \ No newline at end of file diff --git a/packages/web/i18n/zh-CN/evaluation_dimension.json b/packages/web/i18n/zh-CN/evaluation_dimension.json new file mode 100644 index 000000000000..01993b6cb7f4 --- /dev/null +++ b/packages/web/i18n/zh-CN/evaluation_dimension.json @@ -0,0 +1,12 @@ +{ + "create_dimension": "新建维度", + "search_dimension": "搜索评测维度", + "delete_failed": "删除失败", + "delete_success": "删除成功", + "builtin": "内置", + "confirm_delete_dimension": "确认删除该维度?", + "dimension_name": "维度名", + "description": "介绍", + "create_update_time": "创建/更新时间", + "creator": "创建人" +} \ No newline at end of file diff --git a/packages/web/i18n/zh-Hant/dashboard_evaluation.json b/packages/web/i18n/zh-Hant/dashboard_evaluation.json index 508142b00833..77bad3bcf516 100644 --- a/packages/web/i18n/zh-Hant/dashboard_evaluation.json +++ b/packages/web/i18n/zh-Hant/dashboard_evaluation.json @@ -42,5 +42,30 @@ "task_detail": "任務詳情", "team_has_running_evaluation": "當前團隊已有正在運行的應用評測,請等待完成後再創建新的應用評測", "template_csv_file_select_tip": "僅支持嚴格按照模板格式的 {{fileType}} 文件", - "variables": "全局變量" + "variables": "全局變量", + "all_apps": "全部應用", + "search_evaluation_task": "搜索任務名或應用版本", + "create_new_task": "新建任務", + "task_name_column": "任務名", + "progress_column": "進度", + "evaluation_app_column": "評測應用", + "app_version_column": "應用版本", + "evaluation_result_column": "評測結果", + "start_finish_time_column": "開始時間/完成時間", + "executor_column": "執行人", + "waiting": "等待中", + "evaluating_status": "評測中", + "completed_status": "已完成", + "queuing_status": "排隊中", + "running_status": "進行中", + "error_data_tooltip": "{{count}} 條數據執行異常,可點擊查看詳情", + "rename": "重命名", + "delete": "刪除", + "confirm_delete_task": "確認刪除該任務?", + "evaluation_tasks_tab": "評測任務", + "evaluation_tasks": "評測任務", + "evaluation_datasets_tab": "評測數據集", + "evaluation_dimensions_tab": "評測維度", + "create_new": "新建", + "retry_error_data": "重試異常數據" } diff --git a/packages/web/i18n/zh-Hant/evaluation_dataset.json b/packages/web/i18n/zh-Hant/evaluation_dataset.json new file mode 100644 index 000000000000..53686d0264cb --- /dev/null +++ b/packages/web/i18n/zh-Hant/evaluation_dataset.json @@ -0,0 +1,22 @@ +{ + "dataset_name_placeholder": "名稱", + "create_new_dataset": "新建數據集", + "smart_generation": "智能生成", + "file_import": "文件導入", + "rename": "重命名", + "delete": "刪除", + "confirm_delete_dataset": "確認刪除該數據集嗎?", + "error_details": "異常詳情", + "status_queuing": "排隊中", + "status_parsing": "文件解析中", + "status_generating": "數據生成中", + "status_generate_error": "生成異常", + "status_ready": "已就緒", + "status_parse_error": "解析異常", + "click_to_view_details": "點擊查看詳情", + "table_header_name": "名稱", + "table_header_data_count": "數據量", + "table_header_time": "創建/更新時間", + "table_header_status": "狀態", + "table_header_creator": "創建人" +} \ No newline at end of file diff --git a/packages/web/i18n/zh-Hant/evaluation_dimension.json b/packages/web/i18n/zh-Hant/evaluation_dimension.json new file mode 100644 index 000000000000..eb32afb89d81 --- /dev/null +++ b/packages/web/i18n/zh-Hant/evaluation_dimension.json @@ -0,0 +1,12 @@ +{ + "create_dimension": "新建維度", + "search_dimension": "搜索評測維度", + "delete_failed": "刪除失敗", + "delete_success": "刪除成功", + "builtin": "內建", + "confirm_delete_dimension": "確認刪除該維度?", + "dimension_name": "維度名", + "description": "介紹", + "create_update_time": "創建/更新時間", + "creator": "創建人" +} \ No newline at end of file diff --git a/projects/app/src/components/Layout/navbar.tsx b/projects/app/src/components/Layout/navbar.tsx index 05aa996346e9..ebaa32c0ef9e 100644 --- a/projects/app/src/components/Layout/navbar.tsx +++ b/projects/app/src/components/Layout/navbar.tsx @@ -63,7 +63,11 @@ const Navbar = ({ unread }: { unread: number }) => { '/dashboard/[pluginGroupId]', '/dashboard/mcpServer', '/dashboard/evaluation', - '/dashboard/evaluation/create' + '/dashboard/evaluation/task/detail', + '/dashboard/evaluation/dataset/fileImport', + '/dashboard/evaluation/dataset/detail', + '/dashboard/evaluation/dimension/create', + '/dashboard/evaluation/dimension/edit' ] }, { diff --git a/projects/app/src/components/Layout/navbarPhone.tsx b/projects/app/src/components/Layout/navbarPhone.tsx index 4bfa65fda0cf..1d2f97da34a0 100644 --- a/projects/app/src/components/Layout/navbarPhone.tsx +++ b/projects/app/src/components/Layout/navbarPhone.tsx @@ -33,7 +33,11 @@ const NavbarPhone = ({ unread }: { unread: number }) => { '/dashboard/[pluginGroupId]', '/dashboard/mcpServer', '/dashboard/evaluation', - '/dashboard/evaluation/create' + '/dashboard/evaluation/task/detail', + '/dashboard/evaluation/dataset/fileImport', + '/dashboard/evaluation/dataset/detail', + '/dashboard/evaluation/dimension/create', + '/dashboard/evaluation/dimension/edit' ], unread: 0 }, diff --git a/projects/app/src/pages/dashboard/evaluation/components/OldEvaluationTasks.tsx b/projects/app/src/pages/dashboard/evaluation/components/OldEvaluationTasks.tsx new file mode 100644 index 000000000000..32466a4aed58 --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/components/OldEvaluationTasks.tsx @@ -0,0 +1,239 @@ +import { + Box, + Button, + Flex, + IconButton, + Table, + TableContainer, + Tbody, + Td, + Th, + Thead, + Tr +} from '@chakra-ui/react'; +import SearchInput from '@fastgpt/web/components/common/Input/SearchInput'; +import MyIcon from '@fastgpt/web/components/common/Icon'; +import { useRouter } from 'next/router'; +import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; +import { deleteEvaluation, getEvaluationList } from '@/web/core/evaluation/evaluation'; +import { formatTime2YMDHM } from '@fastgpt/global/common/string/time'; +import Avatar from '@fastgpt/web/components/common/Avatar'; +import { usePagination } from '@fastgpt/web/hooks/usePagination'; +import { useState, useEffect, useMemo } from 'react'; +import EvaluationDetailModal from '@/pageComponents/evaluation/DetailModal'; +import { useSystem } from '@fastgpt/web/hooks/useSystem'; +import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; +import type { evaluationType } from '@fastgpt/global/core/evaluation/type'; +import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; +import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConfirm'; +import MyBox from '@fastgpt/web/components/common/MyBox'; +import { useTranslation } from 'next-i18next'; + +const EvaluationTasks = () => { + const router = useRouter(); + const { t } = useTranslation(); + const { isPc } = useSystem(); + + const [searchKey, setSearchKey] = useState(''); + const [evalDetailId, setEvalDetailId] = useState(); + const [pollingInterval, setPollingInterval] = useState(10000); + + const { + data: evaluationList, + Pagination, + getData: fetchData + } = usePagination(getEvaluationList, { + defaultPageSize: 20, + pollingInterval, + pollingWhenHidden: true, + params: { + searchKey + }, + EmptyTip: , + refreshDeps: [searchKey] + }); + + const evalDetail = useMemo(() => { + if (!evalDetailId) return undefined; + return evaluationList.find((item) => item._id === evalDetailId); + }, [evalDetailId, evaluationList]); + + useEffect(() => { + const hasRunningOrErrorTasks = evaluationList.some((item) => { + const { totalCount = 0, completedCount = 0, errorCount = 0 } = item; + const isCompleted = totalCount === completedCount; + return !isCompleted || errorCount > 0; + }); + + setPollingInterval(hasRunningOrErrorTasks ? 10000 : 0); + }, [evaluationList]); + + const { runAsync: onDeleteEval } = useRequest2(deleteEvaluation, { + onSuccess: () => { + fetchData(); + } + }); + + const renderProgress = (item: evaluationType) => { + const { completedCount, totalCount, errorCount } = item; + + if (completedCount === totalCount) { + return ( + + {t('dashboard_evaluation:completed')} + + ); + } + + return ( + + {completedCount} + {`/${totalCount}`} + {(errorCount > 0 || item.errorMessage) && ( + + setEvalDetailId(item._id)} + /> + + )} + + ); + }; + + return ( + <> + + + + { + setSearchKey(e.target.value); + }} + /> + + + + + + + + + + + + + + + + + + + + + {evaluationList.map((item) => { + return ( + + + + + + + + + + ); + })} + +
{t('dashboard_evaluation:Task_name')}{t('dashboard_evaluation:Progress')}{t('dashboard_evaluation:Executor')}{t('dashboard_evaluation:Evaluation_app')}{t('dashboard_evaluation:Start_end_time')}{t('dashboard_evaluation:Overall_score')}{t('dashboard_evaluation:Action')}
+ {item.name} + {renderProgress(item)} + + + {item.executorName} + + + + + {item.appName} + + + {formatTime2YMDHM(item.createTime)} + {formatTime2YMDHM(item.finishTime)} + + {typeof item.score === 'number' ? (item.score * 100).toFixed(2) : '-'} + + + + } + /> + } + content={t('dashboard_evaluation:comfirm_delete_task')} + onConfirm={() => onDeleteEval({ evalId: item._id })} + /> +
+
+
+ + + + + + {!!evalDetail && ( + setEvalDetailId(undefined)} + fetchEvalList={() => fetchData()} + /> + )} + + ); +}; + +export default EvaluationTasks; diff --git a/projects/app/src/pages/dashboard/evaluation/components/create.tsx b/projects/app/src/pages/dashboard/evaluation/components/create.tsx new file mode 100644 index 000000000000..471fb5733c88 --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/components/create.tsx @@ -0,0 +1,378 @@ +import MyBox from '@fastgpt/web/components/common/MyBox'; +import DashboardContainer from '../../../../pageComponents/dashboard/Container'; +import { useTranslation } from 'next-i18next'; +import { Box, Button, Flex, Input, VStack } from '@chakra-ui/react'; +import { useRouter } from 'next/router'; +import { serviceSideProps } from '@/web/common/i18n/utils'; +import AIModelSelector from '@/components/Select/AIModelSelector'; +import { useForm } from 'react-hook-form'; +import { useSystemStore } from '@/web/common/system/useSystemStore'; +import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel'; +import AppSelect from '@/components/Select/AppSelect'; +import MyIcon from '@fastgpt/web/components/common/Icon'; +import FileSelector, { + type SelectFileItemType +} from '@/pageComponents/dataset/detail/components/FileSelector'; +import { Trans } from 'next-i18next'; +import MyIconButton from '@fastgpt/web/components/common/Icon/button'; +import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; +import { getAppDetailById } from '@/web/core/app/api'; +import { useToast } from '@fastgpt/web/hooks/useToast'; +import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; +import { fileDownload } from '@/web/common/file/utils'; +import { postCreateEvaluation } from '@/web/core/evaluation/evaluation'; +import { useMemo, useState } from 'react'; +import Markdown from '@/components/Markdown'; +import { getEvaluationFileHeader } from '@fastgpt/global/core/evaluation/utils'; +import { evaluationFileErrors } from '@fastgpt/global/core/evaluation/constants'; +import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; +import { getErrText } from '@fastgpt/global/common/error/utils'; + +type EvaluationFormType = { + name: string; + evalModel: string; + appId: string; + evaluationFiles: SelectFileItemType[]; +}; + +const EvaluationCreating = () => { + const { t } = useTranslation(); + const router = useRouter(); + const { toast } = useToast(); + + const [percent, setPercent] = useState(0); + const [error, setError] = useState(); + + const { llmModelList, feConfigs } = useSystemStore(); + + const evalModelList = useMemo(() => { + return llmModelList.filter((item) => item.useInEvaluation); + }, [llmModelList]); + const { register, setValue, watch, handleSubmit } = useForm({ + defaultValues: { + name: '', + evalModel: evalModelList[0]?.model, + appId: '', + evaluationFiles: [] as SelectFileItemType[] + } + }); + + const name = watch('name'); + const evalModel = watch('evalModel'); + const appId = watch('appId'); + const evaluationFiles = watch('evaluationFiles'); + + const { runAsync: getAppDetail, loading: isLoadingAppDetail } = useRequest2(() => { + if (appId) return getAppDetailById(appId); + return Promise.resolve(null); + }); + + const handleDownloadTemplate = async () => { + const appDetail = await getAppDetail(); + const variables = appDetail?.chatConfig.variables; + const templateContent = getEvaluationFileHeader(variables); + + fileDownload({ + text: templateContent, + type: 'text/csv;charset=utf-8', + filename: `${appDetail?.name}_evaluation.csv` + }); + }; + + const { runAsync: createEvaluation, loading: isCreating } = useRequest2( + async (data: EvaluationFormType) => { + await postCreateEvaluation({ + file: data.evaluationFiles[0].file, + name: data.name, + evalModel: data.evalModel, + appId: data.appId, + percentListen: setPercent + }); + }, + { + onSuccess: () => { + toast({ + title: t('dashboard_evaluation:evaluation_created'), + status: 'success' + }); + + router.push('/dashboard/evaluation'); + }, + errorToast: '', + onError: (error) => { + if (error.message === evaluationFileErrors) { + setError(error.message); + } else if (error.message === TeamErrEnum.aiPointsNotEnough) { + useSystemStore.getState().setNotSufficientModalType(error.message); + } else { + toast({ + title: t(getErrText(error)), + status: 'error' + }); + } + } + } + ); + + const onSubmit = async (data: EvaluationFormType) => { + if (!data.appId) { + return toast({ + title: t('dashboard_evaluation:app_required'), + status: 'warning' + }); + } + if (!data.evaluationFiles || data.evaluationFiles.length === 0) { + return toast({ + title: t('dashboard_evaluation:file_required'), + status: 'warning' + }); + } + + await createEvaluation(data); + }; + + return ( + + {() => ( + + + + + + {t('dashboard_evaluation:Task_name')} + + + + + + {t('dashboard_evaluation:Evaluation_model')} + + ({ + label: item.name, + value: item.model + }))} + onChange={(e) => { + setValue('evalModel', e); + }} + /> + + + + {t('dashboard_evaluation:Evaluation_app')} + + + + { + setValue('appId', id); + }} + /> + {appId && ( + + )} + + + + + {t('dashboard_evaluation:Evaluation_file')} + + {appId ? ( + + { + setValue('evaluationFiles', e); + }} + FileTypeNode={ + + + }} + /> + + } + /> + {evaluationFiles && evaluationFiles.length > 0 && ( + + {evaluationFiles.map((item, index) => ( + + + + {item.name} + + + { + setValue( + 'evaluationFiles', + evaluationFiles.filter((_, i) => i !== index) + ); + + setError(undefined); + }} + /> + + ))} + + )} + {error && ( + + + + {t('dashboard_evaluation:check_format')} + + + {t('dashboard_evaluation:check_error')} + + + + + )} + + ) : ( + + {t('dashboard_evaluation:app_required')} + + )} + + + + + + + )} + + ); +}; + +export default EvaluationCreating; + +export async function getServerSideProps(content: any) { + return { + props: { + ...(await serviceSideProps(content, ['dashboard_evaluation', 'file'])) + } + }; +} diff --git a/projects/app/src/pages/dashboard/evaluation/dataset/detail/index.tsx b/projects/app/src/pages/dashboard/evaluation/dataset/detail/index.tsx new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/projects/app/src/pages/dashboard/evaluation/dataset/index.tsx b/projects/app/src/pages/dashboard/evaluation/dataset/index.tsx new file mode 100644 index 000000000000..f0a1d66f8ddc --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/dataset/index.tsx @@ -0,0 +1,493 @@ +import React, { useState, useCallback } from 'react'; +import { useRouter } from 'next/router'; +import { + Table, + Thead, + Tbody, + Tr, + Th, + Td, + TableContainer, + Box, + Flex, + HStack, + Input, + InputGroup, + InputLeftElement, + Modal, + ModalOverlay, + ModalContent, + ModalHeader, + ModalBody, + ModalCloseButton, + useDisclosure, + Text +} from '@chakra-ui/react'; +import MyBox from '@fastgpt/web/components/common/MyBox'; +import MyIconButton from '@fastgpt/web/components/common/Icon/button'; +import MyMenu from '@fastgpt/web/components/common/MyMenu'; +import MyIcon from '@fastgpt/web/components/common/Icon'; +import { useConfirm } from '@fastgpt/web/hooks/useConfirm'; +import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; +import { usePagination } from '@fastgpt/web/hooks/usePagination'; +import format from 'date-fns/format'; +import UserBox from '@fastgpt/web/components/common/UserBox'; +import { useEditTitle } from '@/web/common/hooks/useEditTitle'; +import { useTranslation } from 'next-i18next'; +import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; +import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; +import MyTag from '@fastgpt/web/components/common/Tag/index'; +import IntelligentGeneration from '@/pageComponents/dashboard/evaluation/dataset/IntelligentGeneration'; + +// 数据集状态类型 +type DatasetStatus = + | 'queuing' + | 'parsing' + | 'generating' + | 'generateError' + | 'ready' + | 'parseError'; + +// 数据集类型 +interface EvaluationDataset { + id: number; + name: string; + dataCount: number; + status: DatasetStatus; + createTime: Date | string; + updateTime: Date | string; + creator: { + name: string; + avatar: string; + }; + errorMessage?: string; // 异常状态时的错误信息 +} + +// 模拟数据 +const mockDatasets: EvaluationDataset[] = [ + { + id: 1, + name: '数据集1', + dataCount: 100, + status: 'queuing', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + } + }, + { + id: 2, + name: '数据集2', + dataCount: 100, + status: 'parsing', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + } + }, + { + id: 3, + name: '数据集3', + dataCount: 100, + status: 'generating', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + } + }, + { + id: 4, + name: '数据集4', + dataCount: 100, + status: 'generateError', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + errorMessage: '数据生成失败:模型调用异常' + }, + { + id: 5, + name: '数据集5', + dataCount: 100, + status: 'ready', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + } + }, + { + id: 6, + name: '数据集6', + dataCount: 0, + status: 'parseError', + createTime: '2025-05-23T10:36:13.000Z', + updateTime: '2025-05-23T10:56:13.000Z', + creator: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + errorMessage: '文件解析失败:格式不支持或文件损坏' + } +]; + +// 模拟API函数 +const getMockEvaluationDatasets = async (data: any) => { + await new Promise((resolve) => setTimeout(resolve, 300)); + + const { pageNum, pageSize, searchKey = '' } = data; + + // 过滤数据 + let filteredDatasets = mockDatasets.filter((dataset) => { + const matchesSearch = dataset.name.toLowerCase().includes(searchKey.toLowerCase()); + return matchesSearch; + }); + + // 分页 + const total = filteredDatasets.length; + const startIndex = (pageNum - 1) * pageSize; + const endIndex = startIndex + pageSize; + const list = filteredDatasets.slice(startIndex, endIndex); + + return { + list, + total + }; +}; + +const EvaluationDatasets = ({ Tab }: { Tab: React.ReactNode }) => { + const [searchValue, setSearchValue] = useState(''); + const [selectedError, setSelectedError] = useState(''); + const router = useRouter(); + const { t } = useTranslation(); + const { + isOpen: isErrorModalOpen, + onOpen: onOpenErrorModal, + onClose: onCloseErrorModal + } = useDisclosure(); + const { + isOpen: isCreateModalOpen, + onOpen: onOpenCreateModal, + onClose: onCloseCreateModal + } = useDisclosure(); + const { + isOpen: isIntelligentModalOpen, + onOpen: onOpenIntelligentModal, + onClose: onCloseIntelligentModal + } = useDisclosure(); + + // 使用分页Hook + const { + data: datasets, + Pagination, + getData: fetchData + } = usePagination(getMockEvaluationDatasets, { + defaultPageSize: 10, + params: { + searchKey: searchValue + }, + EmptyTip: , + refreshDeps: [searchValue] + }); + + // 状态配置 + const statusConfig = { + queuing: { + label: t('evaluation_dataset:status_queuing'), + colorSchema: 'gray' + }, + parsing: { + label: t('evaluation_dataset:status_parsing'), + colorSchema: 'blue' + }, + generating: { + label: t('evaluation_dataset:status_generating'), + colorSchema: 'blue' + }, + generateError: { + label: t('evaluation_dataset:status_generate_error'), + colorSchema: 'red' + }, + ready: { + label: t('evaluation_dataset:status_ready'), + colorSchema: 'green' + }, + parseError: { + label: t('evaluation_dataset:status_parse_error'), + colorSchema: 'red' + } + }; + + const { openConfirm, ConfirmModal } = useConfirm({ + type: 'delete' + }); + + const { onOpenModal: onOpenEditTitleModal, EditModal: EditTitleModal } = useEditTitle({ + title: t('evaluation_dataset:rename') + }); + + // 模拟更新数据集名称的请求 + const { runAsync: onUpdateDatasetName, loading: isUpdating } = useRequest2( + (datasetId: number, newName: string) => { + console.log('updateDatasetName', datasetId, newName); + return Promise.resolve(); + }, + { + successToast: '更新成功' + } + ); + + // 渲染状态标签 + const renderStatus = (dataset: EvaluationDataset) => { + const config = statusConfig[dataset.status]; + + // 如果状态配置不存在,返回默认状态 + if (!config) { + return -; + } + + const isErrorStatus = dataset.status === 'generateError' || dataset.status === 'parseError'; + + return ( + + { + e.stopPropagation(); + setSelectedError(dataset.errorMessage || 'unknown error'); + onOpenErrorModal(); + } + : undefined + } + > + + {config.label} + {isErrorStatus && } + + + + ); + }; + + const handleDeleteDataset = (datasetId: number) => { + console.log('deleteDataset:', datasetId); + }; + + const handleRenameDataset = (dataset: EvaluationDataset) => { + onOpenEditTitleModal({ + defaultVal: dataset.name, + onSuccess: async (newName) => { + await onUpdateDatasetName(dataset.id, newName); + fetchData(); + } + }); + }; + + const handleCreateDataset = (type: 'smart' | 'import') => { + console.log('createDataset:', type); + onCloseCreateModal(); + + if (type === 'smart') { + onOpenIntelligentModal(); + } else { + // 跳转到文件导入页面 + router.push('/dashboard/evaluation/dataset/fileImport'); + } + }; + + const handleIntelligentGenerationConfirm = useCallback( + (data: any) => { + console.log('generateDataset:', data); + onCloseIntelligentModal(); + // 这里应该调用API创建数据集 + }, + [onCloseIntelligentModal] + ); + + return ( + <> + + {Tab} + + + + + + + setSearchValue(e.target.value)} + bg={'white'} + /> + + + + {t('evaluation_dataset:create_new_dataset')} + + + } + menuList={[ + { + children: [ + { + label: ( + + + {t('evaluation_dataset:smart_generation')} + + ), + onClick: () => handleCreateDataset('smart') + }, + { + label: ( + + + {t('evaluation_dataset:file_import')} + + ), + onClick: () => handleCreateDataset('import') + } + ] + } + ]} + /> +
+ + + + + + + + + + + + + + + + + {datasets.map((dataset) => ( + + + + + + + + + ))} + +
{t('evaluation_dataset:table_header_name')}{t('evaluation_dataset:table_header_data_count')}{t('evaluation_dataset:table_header_time')}{t('evaluation_dataset:table_header_status')}{t('evaluation_dataset:table_header_creator')}
{dataset.name}{dataset.dataCount} + {format(new Date(dataset.createTime), 'yyyy-MM-dd HH:mm:ss')} + {format(new Date(dataset.updateTime), 'yyyy-MM-dd HH:mm:ss')} + {renderStatus(dataset)} + + + handleRenameDataset(dataset) + }, + { + type: 'danger', + icon: 'delete', + label: t('evaluation_dataset:delete'), + onClick: () => + openConfirm( + async () => { + await handleDeleteDataset(dataset.id); + fetchData(); + }, + undefined, + t('evaluation_dataset:confirm_delete_dataset') + )() + } + ] + } + ]} + Button={} + /> +
+
+
+ + + + + + {/* 异常详情弹窗 */} + + + + {t('evaluation_dataset:error_details')} + + + {selectedError} + + + + + + + + {/* 智能生成数据集弹窗 */} + {isIntelligentModalOpen && ( + + )} + + ); +}; + +export default EvaluationDatasets; diff --git a/projects/app/src/pages/dashboard/evaluation/dimension/index.tsx b/projects/app/src/pages/dashboard/evaluation/dimension/index.tsx new file mode 100644 index 000000000000..2b5ea1d438ea --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/dimension/index.tsx @@ -0,0 +1,207 @@ +import React, { useState } from 'react'; +import { + Table, + Thead, + Tbody, + Tr, + Th, + Td, + TableContainer, + Box, + Flex, + Button, + HStack, + Text, + Input, + InputGroup, + InputLeftElement +} from '@chakra-ui/react'; +import MyBox from '@fastgpt/web/components/common/MyBox'; +import MyIconButton from '@fastgpt/web/components/common/Icon/button'; +import MyIcon from '@fastgpt/web/components/common/Icon'; +import MyTag from '@fastgpt/web/components/common/Tag'; +import { useConfirm } from '@fastgpt/web/hooks/useConfirm'; +import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; +import { useRouter } from 'next/router'; +import { usePagination } from '@fastgpt/web/hooks/usePagination'; +import format from 'date-fns/format'; +import UserBox from '@fastgpt/web/components/common/UserBox'; +import { useTranslation } from 'next-i18next'; +import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; +import { getMetricList, deleteMetric } from '@/web/core/evaluation/dimension'; +import { EvalMetricTypeEnum } from '@fastgpt/global/core/evaluation/constants'; +import type { EvalMetricSchemaType } from '@fastgpt/global/core/evaluation/type'; + +const EvaluationDimensions = ({ Tab }: { Tab: React.ReactNode }) => { + const [searchValue, setSearchValue] = useState(''); + const { t } = useTranslation(); + const router = useRouter(); + + // 创建适配器函数来匹配 usePagination 的参数格式 + const getMetricListAdapter = async (data: any) => { + const params = { + page: data.pageNum, + pageSize: data.pageSize, + searchKey: data.searchKey + }; + const result = await getMetricList(params); + + // 根据实际接口响应结构解析数据 + return { + list: result.list || [], + total: result.total || 0 + }; + }; + + // 使用分页Hook + const { + data: dimensions, + Pagination, + getData: fetchData + } = usePagination(getMetricListAdapter, { + defaultPageSize: 10, + params: { + searchKey: searchValue + }, + EmptyTip: , + refreshDeps: [searchValue] + }); + + const { openConfirm, ConfirmModal } = useConfirm({ + type: 'delete' + }); + + const { runAsync: onDeleteMetric } = useRequest2(deleteMetric, { + onSuccess: () => { + fetchData(); + }, + errorToast: t('evaluation_dimension:delete_failed'), + successToast: t('evaluation_dimension:delete_success') + }); + + const handleDeleteDimension = (dimensionId: string) => { + onDeleteMetric(dimensionId); + }; + + return ( + <> + + {Tab} + + + + + + + setSearchValue(e.target.value)} + bg={'white'} + /> + + + + + + + + + + + + + + + + + + + {dimensions.map((dimension: EvalMetricSchemaType) => ( + { + router.push({ + pathname: '/dashboard/evaluation/dimension/edit', + query: { id: dimension._id } + }); + } + : undefined + } + > + + + + + + + ))} + +
{t('evaluation_dimension:dimension_name')}{t('evaluation_dimension:description')}{t('evaluation_dimension:create_update_time')}{t('evaluation_dimension:creator')}
+ + {dimension.name} + {dimension.type === EvalMetricTypeEnum.Builtin && ( + {t('evaluation_dimension:builtin')} + )} + + {dimension.description || '-'} + {format(new Date(dimension.createTime), 'yyyy-MM-dd HH:mm:ss')} + {format(new Date(dimension.updateTime), 'yyyy-MM-dd HH:mm:ss')} + + + e.stopPropagation()}> + {dimension.type === EvalMetricTypeEnum.Custom && ( + + openConfirm( + async () => { + await handleDeleteDimension(dimension._id); + }, + undefined, + t('evaluation_dimension:confirm_delete_dimension') + )() + } + /> + )} +
+
+
+ + + + + + + + ); +}; + +export default EvaluationDimensions; diff --git a/projects/app/src/pages/dashboard/evaluation/index.tsx b/projects/app/src/pages/dashboard/evaluation/index.tsx index cb9b9c00e68f..1961d8544ea7 100644 --- a/projects/app/src/pages/dashboard/evaluation/index.tsx +++ b/projects/app/src/pages/dashboard/evaluation/index.tsx @@ -1,287 +1,56 @@ 'use client'; -import MyBox from '@fastgpt/web/components/common/MyBox'; import DashboardContainer from '../../../pageComponents/dashboard/Container'; import { serviceSideProps } from '@/web/common/i18n/utils'; import { useTranslation } from 'next-i18next'; -import { - Box, - Button, - Flex, - IconButton, - Table, - TableContainer, - Tbody, - Td, - Th, - Thead, - Tr -} from '@chakra-ui/react'; -import SearchInput from '@fastgpt/web/components/common/Input/SearchInput'; -import MyIcon from '@fastgpt/web/components/common/Icon'; +import { Flex } from '@chakra-ui/react'; +import { useState, useMemo } from 'react'; +import FillRowTabs from '@fastgpt/web/components/common/Tabs/FillRowTabs'; +import EvaluationTasks from './task/index'; +import EvaluationDatasets from './dataset/index'; +import EvaluationDimensions from './dimension/index'; import { useRouter } from 'next/router'; -import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; -import { deleteEvaluation, getEvaluationList } from '@/web/core/evaluation/evaluation'; -import { formatTime2YMDHM } from '@fastgpt/global/common/string/time'; -import Avatar from '@fastgpt/web/components/common/Avatar'; -import { usePagination } from '@fastgpt/web/hooks/usePagination'; -import { useState, useEffect, useMemo } from 'react'; -import EvaluationDetailModal from '@/pageComponents/evaluation/DetailModal'; -import { useSystem } from '@fastgpt/web/hooks/useSystem'; -import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; -import type { evaluationType } from '@fastgpt/global/core/evaluation/type'; -import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; -import PopoverConfirm from '@fastgpt/web/components/common/MyPopover/PopoverConfirm'; + +type TabType = 'tasks' | 'datasets' | 'dimensions'; const Evaluation = () => { - const router = useRouter(); const { t } = useTranslation(); + const router = useRouter(); + const { evaluationTab = 'tasks' } = router.query as { evaluationTab: TabType }; - const { isPc } = useSystem(); - - const [searchKey, setSearchKey] = useState(''); - const [evalDetailId, setEvalDetailId] = useState(); - - const [pollingInterval, setPollingInterval] = useState(10000); - - const { - data: evaluationList, - Pagination, - getData: fetchData, - total, - pageSize - } = usePagination(getEvaluationList, { - defaultPageSize: 20, - pollingInterval, - pollingWhenHidden: true, - params: { - searchKey - }, - EmptyTip: , - refreshDeps: [searchKey] - }); - - const evalDetail = useMemo(() => { - if (!evalDetailId) return undefined; - return evaluationList.find((item) => item._id === evalDetailId); - }, [evalDetailId, evaluationList]); - - useEffect(() => { - const hasRunningOrErrorTasks = evaluationList.some((item) => { - const { totalCount = 0, completedCount = 0, errorCount = 0 } = item; - const isCompleted = totalCount === completedCount; - return !isCompleted || errorCount > 0; - }); - - setPollingInterval(hasRunningOrErrorTasks ? 10000 : 0); - }, [evaluationList]); - - const { runAsync: onDeleteEval } = useRequest2(deleteEvaluation, { - onSuccess: () => { - fetchData(); - } - }); - - const renderHeader = (MenuIcon?: React.ReactNode) => { - return isPc ? ( - - - {t('dashboard_evaluation:evaluation')} - - - { - setSearchKey(e.target.value); - }} - /> - - - - ) : ( - - - {MenuIcon} - - {t('dashboard_evaluation:evaluation')} - - - - { - setSearchKey(e.target.value); - }} - /> - - - - ); - }; - - const renderProgress = (item: evaluationType) => { - const { completedCount, totalCount, errorCount } = item; - - if (completedCount === totalCount) { - return ( - - {t('dashboard_evaluation:completed')} - - ); - } - + const Tab = useMemo(() => { return ( - - {completedCount} - {`/${totalCount}`} - {(errorCount > 0 || item.errorMessage) && ( - + list={[ + { label: t('dashboard_evaluation:evaluation_tasks_tab'), value: 'tasks' }, + { label: t('dashboard_evaluation:evaluation_datasets_tab'), value: 'datasets' }, + { label: t('dashboard_evaluation:evaluation_dimensions_tab'), value: 'dimensions' } + ]} + value={evaluationTab} + py={1} + onChange={(e) => { + router.replace({ + query: { + ...router.query, + evaluationTab: e } - > - setEvalDetailId(item._id)} - /> - - )} - + }); + }} + /> ); - }; + }, [router, evaluationTab, t]); return ( - <> - - {({ MenuIcon }) => ( - - {renderHeader(MenuIcon)} - - - - - - - - - - - - - - - - - {evaluationList.map((item) => { - return ( - - - - - - - - - - ); - })} - -
{t('dashboard_evaluation:Task_name')}{t('dashboard_evaluation:Progress')}{t('dashboard_evaluation:Executor')}{t('dashboard_evaluation:Evaluation_app')}{t('dashboard_evaluation:Start_end_time')}{t('dashboard_evaluation:Overall_score')}{t('dashboard_evaluation:Action')}
- {item.name} - {renderProgress(item)} - - - {item.executorName} - - - - - {item.appName} - - - {formatTime2YMDHM(item.createTime)} - {formatTime2YMDHM(item.finishTime)} - - {typeof item.score === 'number' ? (item.score * 100).toFixed(2) : '-'} - - - - } - /> - } - content={t('dashboard_evaluation:comfirm_delete_task')} - onConfirm={() => onDeleteEval({ evalId: item._id })} - /> -
-
- {total >= pageSize && ( - - - - )} + + {({ MenuIcon }) => ( + + + {evaluationTab === 'tasks' && } + {evaluationTab === 'datasets' && } + {evaluationTab === 'dimensions' && } - )} - - {!!evalDetail && ( - setEvalDetailId(undefined)} - fetchEvalList={() => fetchData()} - /> +
)} - +
); }; diff --git a/projects/app/src/pages/dashboard/evaluation/task/detail/index.tsx b/projects/app/src/pages/dashboard/evaluation/task/detail/index.tsx new file mode 100644 index 000000000000..7c686c702358 --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/task/detail/index.tsx @@ -0,0 +1,40 @@ +import React from 'react'; +import { useRouter } from 'next/router'; +import { Box, Flex } from '@chakra-ui/react'; +import { useTranslation } from 'next-i18next'; +import FolderPath from '@/components/common/folder/Path'; + +const EvaluationTaskDetail = () => { + const { t } = useTranslation(); + const router = useRouter(); + + // 路径导航 + const paths = [{ parentId: 'current', parentName: 'taskName' }]; + + return ( + + {/* 顶部导航栏 */} + + {/* 路径导航 */} + + { + router.push(`/dashboard/evaluation?evaluationTab=tasks`); + }} + /> + + + + ); +}; + +export default EvaluationTaskDetail; diff --git a/projects/app/src/pages/dashboard/evaluation/task/index.tsx b/projects/app/src/pages/dashboard/evaluation/task/index.tsx new file mode 100644 index 000000000000..e13349e1e286 --- /dev/null +++ b/projects/app/src/pages/dashboard/evaluation/task/index.tsx @@ -0,0 +1,541 @@ +import React, { useState, useMemo } from 'react'; +import { + Table, + Thead, + Tbody, + Tr, + Th, + Td, + TableContainer, + Box, + Flex, + Button, + HStack, + Input, + InputGroup, + InputLeftElement +} from '@chakra-ui/react'; +import Avatar from '@fastgpt/web/components/common/Avatar'; +import MyBox from '@fastgpt/web/components/common/MyBox'; +import MyIconButton from '@fastgpt/web/components/common/Icon/button'; +import MyMenu from '@fastgpt/web/components/common/MyMenu'; +import MyIcon from '@fastgpt/web/components/common/Icon'; +import { useConfirm } from '@fastgpt/web/hooks/useConfirm'; +import { useRequest2 } from '@fastgpt/web/hooks/useRequest'; +import { usePagination } from '@fastgpt/web/hooks/usePagination'; +import format from 'date-fns/format'; +import UserBox from '@fastgpt/web/components/common/UserBox'; +import { useEditTitle } from '@/web/common/hooks/useEditTitle'; +import { useTranslation } from 'next-i18next'; +import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; +import AppSelectWithAll from '@/pageComponents/dashboard/evaluation/task/AppSelectWithAll'; +import EmptyTip from '@fastgpt/web/components/common/EmptyTip'; +import { useRouter } from 'next/router'; +import CitationTemplate from '@/pageComponents/dashboard/evaluation/dimension/CitationTemplate'; +import ConfigParamsModal from '@/pageComponents/dashboard/evaluation/task/detail/ConfigParams'; +import CreateModal from '@/pageComponents/dashboard/evaluation/task/CreateModal'; + +// 评测结果维度类型 +interface EvaluationDimension { + name: string; + score: number; +} + +// 评测结果类型 +interface EvaluationResult { + type: 'comprehensive' | 'dimensions'; // 综合评分 或 维度评分 + comprehensiveScore?: number; // 综合分数 + dimensions?: EvaluationDimension[]; // 维度分数列表 +} + +// 模拟数据类型 +interface EvaluationTask { + id: number; + name: string; + status: 'pending' | 'running' | 'completed'; + app: { + name: string; + avatar: string; + }; + version: string; + result: string; + createTime: Date | string; + finishTime?: Date | string; + executor: { + name: string; + avatar: string; + }; + // 进度相关字段 + completedCount?: number; + totalCount?: number; + // 异常数据数量 + errorCount?: number; + // 评测结果详情 + evaluationResult?: EvaluationResult; +} + +// 模拟数据 +const mockTasks: EvaluationTask[] = [ + { + id: 1, + name: '任务1', + status: 'pending', + app: { + name: '客服助手', + avatar: 'core/app/type/simpleFill' + }, + version: '2025-08-01', + result: '等待中', + createTime: '2025-08-01T00:58:08.946Z', + executor: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + } + }, + { + id: 2, + name: '任务2', + status: 'running', + app: { + name: '客服助手', + avatar: 'core/app/type/simpleFill' + }, + version: '2025-08-01', + result: '评测中', + createTime: '2025-08-01T00:58:08.946Z', + executor: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + completedCount: 41, + totalCount: 50, + errorCount: 2 + }, + { + id: 3, + name: '任务3', + status: 'running', + app: { + name: '客服助手', + avatar: 'core/app/type/simpleFill' + }, + version: '2025-08-01', + result: '评测中', + createTime: '2025-08-01T00:58:08.946Z', + executor: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + completedCount: 41, + totalCount: 50 + }, + { + id: 4, + name: '任务4', + status: 'completed', + app: { + name: '客服助手', + avatar: 'core/app/type/simpleFill' + }, + version: '2025-08-01', + result: '已完成', + createTime: '2025-08-01T00:58:08.946Z', + finishTime: '2025-08-01T01:58:08.946Z', + executor: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + evaluationResult: { + type: 'comprehensive', + comprehensiveScore: 72 + } + }, + { + id: 5, + name: '任务5', + status: 'completed', + app: { + name: '客服助手', + avatar: 'core/app/type/simpleFill' + }, + version: '2025-08-01', + result: '已完成', + createTime: '2025-08-01T00:58:08.946Z', + finishTime: '2025-08-01T01:58:08.946Z', + executor: { + name: 'violetjam', + avatar: '/imgs/avatar/BlueAvatar.svg' + }, + evaluationResult: { + type: 'dimensions', + dimensions: [ + { name: '回答准确性', score: 62 }, + { name: '回答忠诚度', score: 78 } + ] + } + } +]; + +// 模拟API函数 - 实际项目中应该替换为真实的API调用 +const getMockEvaluationTasks = async (data: any) => { + // 模拟API延迟 + await new Promise((resolve) => setTimeout(resolve, 300)); + + const { pageNum, pageSize, searchKey = '', appFilter = '' } = data; + + // 过滤数据 + let filteredTasks = mockTasks.filter((task) => { + const matchesSearch = task.name.toLowerCase().includes(searchKey.toLowerCase()); + const matchesApp = !appFilter || task.app.name === appFilter; + return matchesSearch && matchesApp; + }); + + // 分页 + const total = filteredTasks.length; + const startIndex = (pageNum - 1) * pageSize; + const endIndex = startIndex + pageSize; + const list = filteredTasks.slice(startIndex, endIndex); + + return { + list, + total + }; +}; + +const EvaluationTasks = ({ Tab }: { Tab: React.ReactNode }) => { + const router = useRouter(); + const [searchValue, setSearchValue] = useState(''); + const [appFilter, setAppFilter] = useState(''); + const [isTemplateModalOpen, setIsTemplateModalOpen] = useState(false); + const [isConfigParamsModalOpen, setIsConfigParamsModalOpen] = useState(false); + const [isCreateModalOpen, setIsCreateModalOpen] = useState(false); + const { t } = useTranslation(); + + // 使用分页Hook + const { + data: tasks, + Pagination, + getData: fetchData + } = usePagination(getMockEvaluationTasks, { + defaultPageSize: 10, + params: { + searchKey: searchValue, + appFilter + }, + EmptyTip: , + refreshDeps: [searchValue, appFilter] + }); + + const statusMap = { + pending: { label: t('dashboard_evaluation:queuing_status'), colorSchema: undefined }, + running: { label: t('dashboard_evaluation:running_status'), colorSchema: 'blue' }, + completed: { label: t('dashboard_evaluation:completed_status'), colorSchema: 'green.600' } + }; + + const { openConfirm, ConfirmModal } = useConfirm({ + type: 'delete' + }); + + const { onOpenModal: onOpenEditTitleModal, EditModal: EditTitleModal } = useEditTitle({ + title: t('common:Rename') + }); + + // TODO: 模拟更新任务名称的请求 + const { runAsync: onUpdateTaskName, loading: isUpdating } = useRequest2( + (taskId: number, newName: string) => { + // 这里应该是实际的API调用,现在使用模拟 + console.log('更新任务名称:', taskId, newName); + return Promise.resolve(); + }, + { + successToast: t('common:update_success') + } + ); + + // 渲染评测结果 + const renderEvaluationResult = (task: EvaluationTask) => { + if (task.status === 'running') { + return {t('dashboard_evaluation:evaluating_status')}; + } + + if (task.status === 'pending') { + return {t('dashboard_evaluation:waiting')}; + } + + if (task.status === 'completed' && task.evaluationResult) { + const { evaluationResult } = task; + + if ( + evaluationResult.type === 'comprehensive' && + evaluationResult.comprehensiveScore !== undefined + ) { + // 综合评分显示 + return ( + + {evaluationResult.comprehensiveScore} + + ); + } + + if (evaluationResult.type === 'dimensions' && evaluationResult.dimensions) { + // 维度评分显示 + return ( + + {evaluationResult.dimensions.map((dimension, index) => ( + + + {dimension.score} + + + ({dimension.name}) + + + ))} + + ); + } + } + + return -; + }; + + const handleDeleteTask = (taskId: number) => { + console.log('删除任务:', taskId); + }; + + const handleRenameTask = (task: EvaluationTask) => { + onOpenEditTitleModal({ + defaultVal: task.name, + onSuccess: async (newName) => { + await onUpdateTaskName(task.id, newName); + fetchData(); // 重新获取数据 + } + }); + }; + + const handleRetryErrorData = (taskId: number) => { + console.log('重试异常数据:', taskId); + // TODO: 实现重试异常数据的API调用 + }; + + const handleCreateNewTask = () => { + setIsCreateModalOpen(true); + }; + + const handleTemplateConfirm = (template: string) => { + console.log('选择的模板:', template); + // TODO: 根据选择的模板创建新任务 + }; + + // 处理配置参数确认 + const handleConfigParamsConfirm = (config: any) => { + console.log('配置参数:', config); + // TODO: 根据配置参数创建新任务 + setIsConfigParamsModalOpen(false); + }; + + // 处理创建任务确认 + const handleCreateTaskConfirm = (data: any) => { + console.log('创建任务:', data); + // TODO: 根据表单数据创建新任务 + fetchData(); // 重新获取数据 + }; + + return ( + <> + + {Tab} + + + + + + + + + + setSearchValue(e.target.value)} + bg={'white'} + /> + + + + + + + + + + + + + + + + + + + + + + {tasks.map((task) => ( + { + router.push({ + pathname: '/dashboard/evaluation/task/detail', + query: { + taskId: task.id + } + }); + }} + > + + + + + + + + + + ))} + +
{t('dashboard_evaluation:task_name_column')}{t('dashboard_evaluation:progress_column')}{t('dashboard_evaluation:evaluation_app_column')}{t('dashboard_evaluation:app_version_column')}{t('dashboard_evaluation:evaluation_result_column')}{t('dashboard_evaluation:start_finish_time_column')}{t('dashboard_evaluation:executor_column')}
{task.name} + {task.status === 'running' && + task.completedCount !== undefined && + task.totalCount !== undefined ? ( + + + + {task.completedCount} + + + /{task.totalCount} + + + {task.errorCount && task.errorCount > 0 && ( + + + + )} + + ) : ( + + {statusMap[task.status]?.label} + + )} + + + + {task.app.name} + + {task.version}{renderEvaluationResult(task)} + {format(new Date(task.createTime), 'yyyy-MM-dd HH:mm:ss')} + + {task.finishTime + ? format(new Date(task.finishTime), 'yyyy-MM-dd HH:mm:ss') + : '-'} + + + + e.stopPropagation()}> + 0 + ? [ + { + icon: 'common/retryLight', + label: t('dashboard_evaluation:retry_error_data'), + onClick: () => handleRetryErrorData(task.id) + } + ] + : []), + { + icon: 'edit', + label: t('dashboard_evaluation:rename'), + onClick: () => handleRenameTask(task) + }, + { + type: 'danger', + icon: 'delete', + label: t('dashboard_evaluation:delete'), + onClick: () => + openConfirm( + async () => { + await handleDeleteTask(task.id); + fetchData(); // 删除后重新获取数据 + }, + undefined, + t('dashboard_evaluation:confirm_delete_task') + )() + } + ] + } + ]} + Button={} + /> +
+
+
+ + + + + + + + setIsTemplateModalOpen(false)} + onConfirm={handleTemplateConfirm} + /> + setIsConfigParamsModalOpen(false)} + onConfirm={handleConfigParamsConfirm} + /> + {isCreateModalOpen && ( + setIsCreateModalOpen(false)} + onSubmit={handleCreateTaskConfirm} + /> + )} + + ); +}; + +export default EvaluationTasks; From 150118898ea096da70a9a361f66ba2a2c14d2118 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=86=8A=E6=B6=9B10037?= <10037@sangfor.com> Date: Tue, 2 Sep 2025 22:06:53 +0800 Subject: [PATCH 017/255] [feat] updated some English translations --- packages/web/i18n/en/account.json | 136 ++--- packages/web/i18n/en/account_apikey.json | 2 +- packages/web/i18n/en/account_bill.json | 88 +-- packages/web/i18n/en/account_info.json | 134 ++--- packages/web/i18n/en/account_promotion.json | 14 +- packages/web/i18n/en/account_setting.json | 8 +- packages/web/i18n/en/account_team.json | 468 +++++++-------- packages/web/i18n/en/account_thirdParty.json | 26 +- packages/web/i18n/en/app.json | 586 +++++++++---------- packages/web/i18n/en/chat.json | 228 ++++---- packages/web/i18n/en/common.json | 256 ++++---- 11 files changed, 973 insertions(+), 973 deletions(-) diff --git a/packages/web/i18n/en/account.json b/packages/web/i18n/en/account.json index 870cd61b7679..bb87544de36e 100644 --- a/packages/web/i18n/en/account.json +++ b/packages/web/i18n/en/account.json @@ -1,86 +1,86 @@ { - "account_team.delete_dataset": "Delete the knowledge base", + "account_team.delete_dataset": "Delete knowledge base", "active_model": "Available models", - "add_default_model": "Add a preset model", + "add_default_model": "Add preset model", "api_key": "API key", - "bills_and_invoices": "Bills", - "channel": "Channel", + "bills_and_invoices": "Bills and invoices", + "channel": "Model channels", "config_model": "Model configuration", - "confirm_logout": "Confirm to log out?", - "create_channel": "Add new channel", - "create_model": "Add new model", - "custom_model": "custom model", - "default_model": "Default model", + "confirm_logout": "Are you sure you want to log out?", + "create_channel": "Add channel", + "create_model": "Add model", + "custom_model": "Custom model", + "default_model": "Preset model", "default_model_config": "Default model configuration", - "logout": "Sign out", - "model.active": "Active", + "logout": "Log out", + "model.active": "Enable", "model.alias": "Alias", - "model.alias_tip": "The name of the model displayed in the system is convenient for users to understand.", - "model.censor": "Censor check", - "model.censor_tip": "If sensitive verification is required, turn on this switch", - "model.charsPointsPrice": "Chars Price", - "model.charsPointsPrice_tip": "Combine the model input and output for Token billing. If the language model is configured with input and output billing separately, the input and output will be calculated separately.", - "model.custom_cq_prompt": "Custom question classification prompt words", - "model.custom_cq_prompt_tip": "Override the system's default question classification prompt words, which default to:\n\"\"\"\n{{prompt}}\n\"\"\"", - "model.custom_extract_prompt": "Custom content extraction prompt words", - "model.custom_extract_prompt_tip": "The reminder word of the coverage of the system, the default:\n\"\"\"\n{{prompt}}\n\"\"\"", - "model.dataset_process": "Dataset file parse", - "model.defaultConfig": "Additional Body parameters", - "model.defaultConfig_tip": "Each request will carry this additional Body parameter.", - "model.default_config": "Body extra fields", - "model.default_config_tip": "When initiating a conversation request, merge this configuration. \nFor example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"", + "model.alias_tip": "Display name of the model in the system for easier understanding.", + "model.censor": "Sensitive content check", + "model.censor_tip": "Enable this option if sensitive content check is required.", + "model.charsPointsPrice": "Overall price", + "model.charsPointsPrice_tip": "Combine input and output for token billing. If input and output prices are configured separately, they will be calculated individually.", + "model.custom_cq_prompt": "Custom prompt for question classification", + "model.custom_cq_prompt_tip": "Overwrite the default system prompt for question classification. Default:\n\"\"\"\n{{prompt}}\n\"\"\"", + "model.custom_extract_prompt": "Custom prompt for content extraction", + "model.custom_extract_prompt_tip": "Overwrite the default system prompt. Default:\n\"\"\"\n{{prompt}}\n\"\"\"", + "model.dataset_process": "Knowledge base file processing", + "model.defaultConfig": "Extra Body parameter", + "model.defaultConfig_tip": "The extra Body parameter will be included in every request.", + "model.default_config": "Extra Body field", + "model.default_config_tip": "Merge these settings when users send a chat request. Example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"", "model.default_model": "Default model", "model.default_system_chat_prompt": "Default prompt", - "model.default_system_chat_prompt_tip": "When the model talks, it will carry this default prompt word.", - "model.default_token": "Default tokens", - "model.default_token_tip": "The length of the default text block of the index model must be less than the maximum length above", - "model.delete_model_confirm": "Confirm to delete this model?", - "model.edit_model": "Model parameter editing", - "model.function_call": "Function Call", - "model.function_call_tip": "If the model supports function calling, turn on this switch. \nTool calls have higher priority.", + "model.default_system_chat_prompt_tip": "The default prompt will be included in all conversations with the model.", + "model.default_token": "Default chunk size", + "model.default_token_tip": "Default text chunk size for indexing models. Must be smaller than the maximum context length.", + "model.delete_model_confirm": "Are you sure you want to delete the model?", + "model.edit_model": "Edit model parameters", + "model.function_call": "Function calling", + "model.function_call_tip": "Enable this option if the model supports function calling. Tool calling takes higher priority.", "model.input_price": "Input price", - "model.input_price_tip": "Language model input price. If this item is configured, the model comprehensive price will be invalid.", - "model.json_config": "File config", - "model.json_config_confirm": "Confirm to use this configuration for override?", - "model.json_config_tip": "Configure the model through the configuration file. After clicking Confirm, the entered configuration will be used for full coverage. Please ensure that the configuration file is entered correctly. \nIt is recommended to copy the current configuration file for backup before operation.", - "model.max_quote": "KB max quote", + "model.input_price_tip": "Input price for the model. If configured, the overall price will become invalid.", + "model.json_config": "Configuration file", + "model.json_config_confirm": "Are you sure you want to apply the configuration?", + "model.json_config_tip": "The configuration file will be used to overwrite the current configuration of the model. Please make sure the configuration file is correct and back up the current configuration first.", + "model.max_quote": "Max knowledge base references", "model.max_temperature": "Max temperature", "model.model_id": "Model ID", - "model.model_id_tip": "The unique identifier of the model, that is, the value of the actual request to the service provider model, needs to correspond to the model in the OneAPI channel.", - "model.normalization": "Normalization processing", - "model.normalization_tip": "If the Embedding API does not normalize vector values, the switch can be enabled and the system will normalize.\n\nUnnormalized APIs, which are represented by the vector search score greater than 1.", + "model.model_id_tip": "Unique identifier of the model. This must match the model value from the provider and correspond to the OneAPI channel.", + "model.normalization": "Normalization", + "model.normalization_tip": "If the Embedding API does not normalize vectors, enable this option to let the system normalize them.\nWithout normalization, the vector retrieval score will be greater than 1.", "model.output_price": "Output price", - "model.output_price_tip": "The language model output price. If this item is configured, the model comprehensive price will be invalid.", + "model.output_price_tip": "Output price for the model. If configured, the overall price will become invalid.", "model.param_name": "Parameter name", - "model.reasoning": "Support output thinking", - "model.reasoning_tip": "For example, Deepseek-reasoner can output the thinking process.", - "model.request_auth": "Custom key", - "model.request_auth_tip": "When making a request to a custom request address, carry the request header: Authorization: Bearer xxx to make the request.", - "model.request_url": "Custom url", - "model.request_url_tip": "If you fill in this value, you will initiate a request directly without passing. \nYou need to follow the API format of Openai and fill in the full request address, such as\n\nLLM: {Host}}/v1/Chat/Completions\n\nEmbedding: {host}}/v1/embeddings\n\nSTT: {Host}/v1/Audio/Transcriptions\n\nTTS: {Host}}/v1/Audio/Speech\n\nRERARARARARARARANK: {Host}}/v1/RERARARARARARARARARARANK", + "model.reasoning": "Reasoning output", + "model.reasoning_tip": "For example, Deepseek-reasoner can output the reasoning process.", + "model.request_auth": "Custom request key", + "model.request_auth_tip": "When you send requests to the custom request URL, include the header Authorization: Bearer xxx.", + "model.request_url": "Custom request URL", + "model.request_url_tip": "If configured, requests will be sent directly to this address without passing through OneAPI. Follow the OpenAI API format and provide a complete request URL. Example:\nLLM: {{host}}/v1/chat/completions\nEmbedding: {{host}}/v1/embeddings\nSTT: {{host}}/v1/audio/transcriptions\nTTS: {{host}}/v1/audio/speech\nRerank: {{host}}/v1/rerank", "model.response_format": "Response format", - "model.show_stop_sign": "Display stop sequence parameters", - "model.show_top_p": "Show Top-p parameters", - "model.test_model": "Model testing", - "model.tool_choice": "Tool choice", - "model.tool_choice_tag": "ToolCall", - "model.tool_choice_tip": "If the model supports tool calling, turn on this switch", - "model.used_in_classify": "Used for problem classification", - "model.used_in_extract_fields": "for text extraction", - "model.used_in_query_extension": "For problem optimization", - "model.used_in_tool_call": "Used for tool call nodes", - "model.vision": "Vision model", + "model.show_stop_sign": "Show stop sequence parameter", + "model.show_top_p": "Show Top-p parameter", + "model.test_model": "Model test", + "model.tool_choice": "Enable tool calling", + "model.tool_choice_tag": "Tool calling", + "model.tool_choice_tip": "Enable this option if the model supports tool calling.", + "model.used_in_classify": "Question classification", + "model.used_in_extract_fields": "Text extraction", + "model.used_in_query_extension": "Question optimization", + "model.used_in_tool_call": "Tool calling node", + "model.vision": "Image recognition", "model.vision_tag": "Vision", - "model.vision_tip": "If the model supports image recognition, turn on this switch.", - "model.voices": "voice role", - "model.voices_tip": "Configure multiple through an array, for example:\n\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]", - "model_provider": "Model Provider", - "notifications": "Notify", - "personal_information": "Personal", + "model.vision_tip": "Enable this option if the model supports image recognition.", + "model.voices": "Voice role", + "model.voices_tip": "Configure multiple roles using an array. Example:\n[\n {\n \"label\": \"Alloy\",\n \"value\": \"alloy\"\n },\n {\n \"label\": \"Echo\",\n \"value\": \"echo\"\n }\n]", + "model_provider": "Model provider", + "notifications": "Notification", + "personal_information": "Personal info", "personalization": "Personalization", - "promotion_records": "Promotions", - "reset_default": "Restore the default configuration", - "team": "Team", - "third_party": "Third Party", + "promotion_records": "Promotion record", + "reset_default": "Reset to default", + "team": "Team management", + "third_party": "Third-party account", "usage_records": "Usage" } diff --git a/packages/web/i18n/en/account_apikey.json b/packages/web/i18n/en/account_apikey.json index 47bb633b9e7a..cff5a8fd3937 100644 --- a/packages/web/i18n/en/account_apikey.json +++ b/packages/web/i18n/en/account_apikey.json @@ -1,3 +1,3 @@ { - "key_tips": "You can use API keys to access some specific interfaces (you cannot access the application, you need to use the API key in the application to access the application)" + "key_tips": "You can use an API key to access certain interfaces. (Accessing apps requires in-app API keys.)" } \ No newline at end of file diff --git a/packages/web/i18n/en/account_bill.json b/packages/web/i18n/en/account_bill.json index f90e6b9c9278..79c37a47f153 100644 --- a/packages/web/i18n/en/account_bill.json +++ b/packages/web/i18n/en/account_bill.json @@ -1,62 +1,62 @@ { - "Invoice_document": "Invoice documents", - "all": "all", - "back": "return", - "bank_account": "Account opening account", - "bank_name": "Bank of deposit", + "Invoice_document": "Invoice file", + "all": "All", + "back": "Back", + "bank_account": "Account number", + "bank_name": "Bank name", "bill_detail": "Bill details", - "bill_record": "billing records", - "click_to_download": "Click to download", + "bill_record": "Billing record", + "click_to_download": "Download", "company_address": "Company address", - "company_phone": "Company phone number", + "company_phone": "Company phone", "completed": "Completed", - "confirm": "confirm", - "contact_phone": "Contact phone number", - "contact_phone_void": "Contact phone number format error", - "day": "sky", + "confirm": "OK", + "contact_phone": "Phone number", + "contact_phone_void": "Invalid phone number.", + "day": "Days", "default_header": "Default header", "detail": "Details", "email_address": "Email address", - "extra_ai_points": "AI points calculation standard", - "extra_dataset_size": "Additional knowledge base capacity", - "generation_time": "Generation time", - "has_invoice": "Whether the invoice has been issued", + "extra_ai_points": "Extra AI points", + "extra_dataset_size": "Extra knowledge base capacity", + "generation_time": "Time generated", + "has_invoice": "Invoice issued", "invoice_amount": "Invoice amount", "invoice_detail": "Invoice details", - "invoice_sending_info": "The invoice will be sent to your mailbox within 3-7 working days, please be patient.", + "invoice_sending_info": "The invoice will be sent to the specified email address within 3-7 workdays. Please wait.", "mm": "mm", - "month": "moon", - "need_special_invoice": "Do you need a special ticket?", - "no": "no", - "no_invoice_record": "No bill record~", - "no_invoice_record_tip": "No invoicing record yet", - "order_number": "Order number", + "month": "Monthly", + "need_special_invoice": "VAT invoice required", + "no": "No", + "no_invoice_record": "No data available.", + "no_invoice_record_tip": "No data available.", + "order_number": "Order ID", "order_type": "Order type", - "organization_name": "Organization name", + "organization_name": "Organization", "payment_method": "Payment method", "payway_coupon": "Redeem code", "rerank": "Rerank", - "save": "save", - "save_failed": "Save exception", - "save_success": "Saved successfully", - "status": "state", - "sub_mode_custom": "Customize", - "submit_failed": "Submission failed", - "submit_success": "Submission successful", + "save": "Save", + "save_failed": "Error occurred during the operation.", + "save_success": "Saved successfully.", + "status": "Status", + "sub_mode_custom": "Custom", + "submit_failed": "Submission failed.", + "submit_success": "Submitted successfully.", "submitted": "Submitted", "subscription_mode_month": "Duration", - "subscription_package": "Subscription package", - "subscription_period": "Subscription cycle", + "subscription_package": "Subscription plan", + "subscription_period": "Periodic", "support_wallet_amount": "Amount", - "support_wallet_apply_invoice": "Billable bills", - "support_wallet_bill_tag_invoice": "bill invoice", - "support_wallet_invoicing": "Invoicing", - "time": "time", - "total_amount": "lump sum", - "type": "type", - "unit_code": "unified credit code", - "unit_code_void": "Unified credit code format error", - "update": "renew", - "yes": "yes", - "yuan": "¥{{amount}}" + "support_wallet_apply_invoice": "Invoiceable bill", + "support_wallet_bill_tag_invoice": "Bill invoice", + "support_wallet_invoicing": "Issue invoice", + "time": "Time", + "total_amount": "Total amount", + "type": "Type", + "unit_code": "Unified social credit code", + "unit_code_void": "Invalid unified social credit code.", + "update": "Update", + "yes": "Yes", + "yuan": "{{amount}} CNY" } diff --git a/packages/web/i18n/en/account_info.json b/packages/web/i18n/en/account_info.json index 15a520732bef..955d06c2622a 100644 --- a/packages/web/i18n/en/account_info.json +++ b/packages/web/i18n/en/account_info.json @@ -1,82 +1,82 @@ { - "account_duplicate": "account", - "account_knowledge_base_cleanup_warning": "When the free version team does not log in to the system for 30 consecutive days, the system will automatically clean up the account knowledge base.", - "active": "Taking effect", + "account_duplicate": "Account", + "account_knowledge_base_cleanup_warning": "If a team using the free edition is inactive for 30 days, its knowledge bases will be cleared automatically.", + "active": "Active", "ai_points": "AI points", "ai_points_calculation_standard": "AI points", - "ai_points_usage": "AI points", - "ai_points_usage_tip": "Each time the AI ​​model is called, a certain amount of AI points will be consumed. \nFor specific calculation standards, please refer to the \"Billing Standards\" above.", - "app_amount": "App amount", - "avatar": "Avatar", - "avatar_selection_exception": "Abnormal avatar selection", - "balance": "balance", - "billing_standard": "Standards", + "ai_points_usage": "AI point usage", + "ai_points_usage_tip": "Each AI model call consumes AI points. For details, refer to the billing standard above.", + "app_amount": "Apps", + "avatar": "Profile image", + "avatar_selection_exception": "Error occurred while selecting the profile image.", + "balance": "Balance", + "billing_standard": "Billing standard", "cancel": "Cancel", - "change": "change", - "choose_avatar": "Click to select avatar", - "click_modify_nickname": "Click to modify nickname", - "code_required": "Verification code cannot be empty", - "confirm": "confirm", - "confirm_password": "Confirm Password", - "contact_customer_service": "Contact customer service", + "change": "Change", + "choose_avatar": "Click to select a profile image.", + "click_modify_nickname": "Click to change nickname", + "code_required": "Verification code is required.", + "confirm": "OK", + "confirm_password": "Confirm password", + "contact_customer_service": "Contact Customer Service", "contact_us": "Contact us", "current_package": "Current plan", - "current_token_price": "Current points price", - "dataset_amount": "Dataset amount", - "effective_time": "Effective time", - "email_label": "Mail", - "exchange": "Exchange", - "exchange_failure": "Redemption failed", - "exchange_success": "Redemption successful", + "current_token_price": "Current point price", + "dataset_amount": "Knowledge bases", + "effective_time": "Valid since", + "email_label": "Email", + "exchange": "Redeem", + "exchange_failure": "Operation failed.", + "exchange_success": "Redeemed successfully.", "expiration_time": "Expiration time", "expired": "Expired", - "general_info": "General information", - "group": "Group", - "help_chatbot": "robot assistant", + "general_info": "Basics", + "group": "groups", + "help_chatbot": "Bot assistant", "help_document": "Help documentation", - "knowledge_base_capacity": "Dataset usages", - "manage": "Manage", - "member_amount": "Member amount", - "member_name": "Name", - "month": "moon", - "new_password": "New Password", - "notification_receiving": "Notify", - "old_password": "Old Password", - "package_and_usage": "Plans", - "package_details": "Details", - "package_expiry_time": "Expired", - "package_usage_rules": "Package usage rules: The system will give priority to using more advanced packages, and the original unused packages will take effect later.", + "knowledge_base_capacity": "Knowledge base capacity", + "manage": "Management", + "member_amount": "Members", + "member_name": "Member name", + "month": "Monthly", + "new_password": "New password", + "notification_receiving": "Notification recipient", + "old_password": "Current password", + "package_and_usage": "Plans and usage", + "package_details": "Plan details", + "package_expiry_time": "Plan expiration time", + "package_usage_rules": "Plan usage rules: Higher-level plans are used first. Unused lower-level plans will be applied later.", "password": "Password", - "password_mismatch": "Password Inconsistency: Two passwords are inconsistent", - "password_tip": "Password must be at least 8 characters long and contain at least two combinations: numbers, letters, or special characters", - "password_update_error": "Exception when changing password", - "password_update_success": "Password changed successfully", - "pending_usage": "To be used", - "phone_label": "Phone number", - "please_bind_contact": "Please bind the contact information", - "please_bind_notification_receiving_path": "Please bind the notification receiving method first", - "purchase_extra_package": "Upgrade", - "redeem_coupon": "Redeem coupon", - "reminder_create_bound_notification_account": "Remind the creator to bind the notification account", - "reset_password": "reset password", - "resource_usage": "Usages", - "select_avatar": "Click to select avatar", - "standard_package_and_extra_resource_package": "Includes standard and extra plans", - "storage_capacity": "Storage capacity", - "team_balance": "Balance", - "team_info": "Team Information", - "token_validity_period": "Points are valid for one year", - "tokens": "integral", - "type": "type", + "password_mismatch": "Passwords do not match.", + "password_tip": "Password must be at least 8 characters long and contain at least 2 of the following: digits, letters, and special characters.", + "password_update_error": "Failed to change the password.", + "password_update_success": "Password changed successfully.", + "pending_usage": "Available", + "phone_label": "Mobile number", + "please_bind_contact": "Please specify contact information.", + "please_bind_notification_receiving_path": "Please specify a notification recipient first.", + "purchase_extra_package": "Purchase extra plan", + "redeem_coupon": "Redeem code", + "reminder_create_bound_notification_account": "Remind the creator to specify an account to receive notifications.", + "reset_password": "Reset password", + "resource_usage": "Resource usage", + "select_avatar": "Click to select a profile image.", + "standard_package_and_extra_resource_package": "Includes the standard plan and extra resource packages.", + "storage_capacity": "Max shards", + "team_balance": "Team balance", + "team_info": "Team info", + "token_validity_period": "Points are valid for 1 year.", + "tokens": "Points", + "type": "Type", "unlimited": "Unlimited", "update_password": "Change password", - "update_success_tip": "Update data successfully", - "upgrade_package": "Upgrade", - "usage_balance": "Use balance: Use balance", - "usage_balance_notice": "Due to the system upgrade, the original \"automatic renewal and deduction from balance\" mode has been cancelled, and the balance recharge entrance has been closed. \nYour balance can be used to purchase points", - "user_account": "Username", + "update_success_tip": "Data updated successfully.", + "upgrade_package": "Upgrade plan", + "usage_balance": "Payment method: Balance", + "usage_balance_notice": "Due to a system upgrade, renewal with auto reduction from balance has been disabled, and balance top-up is no longer available. Your balance can still be used to purchase points.", + "user_account": "Account", "user_team_team_name": "Team", "verification_code": "Verification code", - "you_can_convert": "you can redeem", - "yuan": "Yuan" + "you_can_convert": "You can redeem", + "yuan": "CNY." } diff --git a/packages/web/i18n/en/account_promotion.json b/packages/web/i18n/en/account_promotion.json index 6162052bfe17..b3d40b3cd936 100644 --- a/packages/web/i18n/en/account_promotion.json +++ b/packages/web/i18n/en/account_promotion.json @@ -1,13 +1,13 @@ { "amount": "Amount", - "cashback_ratio": "Cash back ratio", - "cashback_ratio_description": "When your friends recharge, you will receive a certain percentage of your balance as a reward.", + "cashback_ratio": "Cashback rate", + "cashback_ratio_description": "You will receive a balance reward proportionate to your friend's top-up amount.", "copy_invite_link": "Copy invitation link", "earnings": "Income (¥)", "invite_url": "Invitation link", - "invite_url_tip": "Friends who register through this link will be permanently bound to you, and you will receive a certain balance reward when they recharge.\n \nIn addition, when your friends register using their mobile phone number, you will immediately receive a 5 yuan reward.\n \nRewards are sent to your default team.", - "no_invite_records": "No invitation record yet", - "time": "time", - "total_invited": "Cumulative number of invitees", - "type": "type" + "invite_url_tip": "Friends who register through this link will be permanently linked to your account. Each time they top up, you will receive a balance reward.\nIf they register with a mobile number, you will get an immediate 5 CNY bonus.\nAll rewards are credited to your default team account.", + "no_invite_records": "No data available.", + "time": "Time", + "total_invited": "Total invites", + "type": "Type" } \ No newline at end of file diff --git a/packages/web/i18n/en/account_setting.json b/packages/web/i18n/en/account_setting.json index 70faef92a7ac..142f701c89ce 100644 --- a/packages/web/i18n/en/account_setting.json +++ b/packages/web/i18n/en/account_setting.json @@ -1,6 +1,6 @@ { - "language": "language", - "personalization": "personalization", - "timezone": "time zone", - "update_data_success": "Update data successfully" + "language": "Language", + "personalization": "Personalization", + "timezone": "Time zone", + "update_data_success": "Data updated successfully." } \ No newline at end of file diff --git a/packages/web/i18n/en/account_team.json b/packages/web/i18n/en/account_team.json index 5c8a62e9e762..52afe7a478b2 100644 --- a/packages/web/i18n/en/account_team.json +++ b/packages/web/i18n/en/account_team.json @@ -1,267 +1,267 @@ { "1person": "1 person", - "1year": "1 Year", - "30mins": "30 Minutes", - "7days": "7 Days", - "accept": "accept", - "action": "operate", - "admin_add_plan": "Add a team package", - "admin_add_user": "Add a user", - "admin_change_license": "Change of license", - "admin_create_app_template": "Add a template", - "admin_create_plugin": "Add plugins", - "admin_create_plugin_group": "Create plugin grouping", - "admin_delete_app_template": "Delete the template", - "admin_delete_plugin": "Plugin Delete", - "admin_delete_plugin_group": "Delete plugin grouping", - "admin_delete_template_type": "Delete template classification", - "admin_finish_invoice": "Issuing an invoice", - "admin_login": "Administrator login", - "admin_save_template_type": "Update template classification", - "admin_send_system_inform": "Send system notifications", - "admin_update_app_template": "Update templates", - "admin_update_plan": "Editorial Team Package", - "admin_update_plugin": "Plugin Update", - "admin_update_plugin_group": "Plugin group update", - "admin_update_system_config": "System configuration update", - "admin_update_system_modal": "System announcement configuration", - "admin_update_team": "Edit team information", - "admin_update_user": "Edit User", - "assign_permission": "Permission change", - "audit_log": "audit", - "change_department_name": "Department Editor", - "change_member_name": "Member name change", + "1year": "1 year", + "30mins": "30 mins", + "7days": "7 days", + "accept": "Accept", + "action": "Operation", + "admin_add_plan": "Add team plan", + "admin_add_user": "Add user", + "admin_change_license": "Change license", + "admin_create_app_template": "Add template", + "admin_create_plugin": "Add plugin", + "admin_create_plugin_group": "Create plugin group", + "admin_delete_app_template": "Delete template", + "admin_delete_plugin": "Delete plugin", + "admin_delete_plugin_group": "Delete plugin group", + "admin_delete_template_type": "Delete template category", + "admin_finish_invoice": "Issue invoice", + "admin_login": "Admin login", + "admin_save_template_type": "Update template category", + "admin_send_system_inform": "Send system notification", + "admin_update_app_template": "Update template", + "admin_update_plan": "Edit team plan", + "admin_update_plugin": "Update plugin", + "admin_update_plugin_group": "Update plugin group", + "admin_update_system_config": "Update system configuration", + "admin_update_system_modal": "Configure system announcement", + "admin_update_team": "Edit team info", + "admin_update_user": "Edit user info", + "assign_permission": "Change permission", + "audit_log": "Audit", + "change_department_name": "Edit department", + "change_member_name": "Change member name", "change_member_name_self": "Change member name", - "change_notification_settings": "Change the way to receive notifications", - "change_password": "change password", - "confirm_delete_from_org": "Confirm to move {{username}} out of the department?", - "confirm_delete_from_team": "Confirm to move {{username}} out of the team?", - "confirm_delete_group": "Confirm to delete group?", - "confirm_delete_org": "Confirm to delete organization?", - "confirm_forbidden": "Confirm forbidden", - "confirm_leave_team": "Confirmed to leave the team? \nAfter exiting, all your resources in the team are transferred to the team owner.", + "change_notification_settings": "Change notification recipient", + "change_password": "Change password", + "confirm_delete_from_org": "Are you sure you want to remove {{username}} from the department?", + "confirm_delete_from_team": "Are you sure you want to remove the member ({{username}}) from the team?", + "confirm_delete_group": "Are you sure you want to delete the group?", + "confirm_delete_org": "Are you sure you want to delete the department?", + "confirm_forbidden": "Confirm", + "confirm_leave_team": "Are you sure you want to leave the team?\nAll your resources in this team will be transferred to the team owner.", "copy_link": "Copy link", "create_api_key": "Create API key", - "create_app": "Create an application", - "create_app_copy": "Create a copy of the application", - "create_app_folder": "Create an application folder", - "create_app_publish_channel": "Create a sharing channel", - "create_collection": "Create a collection", + "create_app": "Create app", + "create_app_copy": "Create app replica", + "create_app_folder": "Create app folder", + "create_app_publish_channel": "Create sharing channel", + "create_collection": "Create collection", "create_data": "Insert data", - "create_dataset": "Create a knowledge base", - "create_dataset_folder": "Create a Knowledge Base Folder", - "create_department": "Create a sub-department", + "create_dataset": "Create knowledge base", + "create_dataset_folder": "Create knowledge base folder", + "create_department": "Create sub-department", "create_group": "Create group", - "create_invitation_link": "Create Invitation Link", - "create_invoice": "Issuing invoices", - "create_org": "Create organization", - "create_sub_org": "Create sub-organization", - "dataset.api_file": "API Import", - "dataset.common_dataset": "Dataset", - "dataset.external_file": "External File", - "dataset.feishu_dataset": "Feishu Spreadsheet", + "create_invitation_link": "Create invitation link", + "create_invoice": "Issue invoice", + "create_org": "Create department", + "create_sub_org": "Create sub-department", + "dataset.api_file": "API import", + "dataset.common_dataset": "Knowledge base", + "dataset.external_file": "External file", + "dataset.feishu_dataset": "Feishu bitable", "dataset.folder_dataset": "Folder", - "dataset.website_dataset": "Website Sync", - "dataset.yuque_dataset": "Yuque Knowledge Base", - "delete": "delete", - "delete_api_key": "Delete the API key", - "delete_app": "Delete the workbench application", - "delete_app_collaborator": "App permissions delete", - "delete_app_publish_channel": "Delete the publishing channel", - "delete_collection": "Delete a collection", + "dataset.website_dataset": "Website sync", + "dataset.yuque_dataset": "Yuque knowledge base", + "delete": "Delete", + "delete_api_key": "Delete API key", + "delete_app": "Delete workspace app", + "delete_app_collaborator": "Remove app permission", + "delete_app_publish_channel": "Delete publishing channel", + "delete_collection": "Delete collection", "delete_data": "Delete data", - "delete_dataset": "Delete the knowledge base", - "delete_dataset_collaborator": "Knowledge Base Permission Deletion", + "delete_dataset": "Delete knowledge base", + "delete_dataset_collaborator": "Remove knowledge base permission", "delete_department": "Delete sub-department", - "delete_evaluation": "Delete application review data", - "delete_from_org": "Move out of department", - "delete_from_team": "Move out of the team", - "delete_group": "Delete a group", - "delete_org": "Delete organization", - "department": "department", - "edit_info": "Edit information", + "delete_evaluation": "Delete app evaluation data", + "delete_from_org": "Remove user from department", + "delete_from_team": "Remove user from team", + "delete_group": "Delete group", + "delete_org": "Delete department", + "department": "Department", + "edit_info": "Edit", "edit_member": "Edit user", - "edit_member_tip": "Name", - "edit_org_info": "Edit organization information", + "edit_member_tip": "Member name", + "edit_org_info": "Edit department info", "expires": "Expiration time", - "export_app_chat_log": "Export the app chat history", - "export_bill_records": "Export billing history", + "export_app_chat_log": "Export app chat history", + "export_bill_records": "Export billing record", "export_dataset": "Export knowledge base", - "export_members": "Export members", - "forbid_hint": "After forbidden, this invitation link will become invalid. This action is irreversible. Are you sure you want to deactivate?", - "forbid_success": "Forbid success", - "forbidden": "Forbidden", - "group": "group", + "export_members": "Export member", + "forbid_hint": "Disabled invitation links will become invalid and cannot be restored. Are you sure you want to disable this invitation link?", + "forbid_success": "Disabled successfully.", + "forbidden": "Disable", + "group": "Group", "group_name": "Group name", - "handle_invitation": "Handle Invitation", - "has_forbidden": "Forbidden", + "handle_invitation": "Manage team invitations", + "has_forbidden": "Expired", "has_invited": "Invited", - "ignore": "Ignore", - "inform_level_common": "Normal", - "inform_level_emergency": "Emergency", + "ignore": "Ignored", + "inform_level_common": "Moderate", + "inform_level_emergency": "Critical", "inform_level_important": "Important", - "invitation_copy_link": "[{{systemName}}] {{userName}} invites you to join the {{teamName}} team, link: {{url}}", - "invitation_link_auto_clean_hint": "Expired links will be automatically cleaned up after 30 days", + "invitation_copy_link": "[{{systemName}}] {{userName}} has invited you to join the team ({{teamName}}). Click the following link to join: {{url}}", + "invitation_link_auto_clean_hint": "Expired links will be automatically cleared 30 days later.", "invitation_link_description": "Link description", - "invitation_link_list": "Invitation link list", - "invite_member": "Invite members", + "invitation_link_list": "Links", + "invite_member": "Invite member", "invited": "Invited", - "join_team": "Join the team", - "join_update_time": "Join/Update Time", - "kick_out_team": "Remove members", - "label_sync": "Tag sync", + "join_team": "Join team", + "join_update_time": "Time joined/updated", + "kick_out_team": "Remove member", + "label_sync": "Sync tag", "leave": "Resigned", - "leave_team_failed": "Leaving the team exception", - "log_admin_add_plan": "【{{name}}】A package will be added to a team with a team id [{{teamId}}]", - "log_admin_add_user": "【{{name}}】Create a user named [{{userName}}]", - "log_admin_change_license": "【{{name}}】Changed License", - "log_admin_create_app_template": "【{{name}}】Added a template named [{{templateName}}]", - "log_admin_create_plugin": "【{{name}}】Added plugin named [{{pluginName}}]", - "log_admin_create_plugin_group": "【{{name}}】Create a plug-in group called [{{groupName}}]", - "log_admin_delete_app_template": "【{{name}}】Deleted the template named [{{templateName}}]", - "log_admin_delete_plugin": "【{{name}}】Remove plugin named [{{pluginName}}]", - "log_admin_delete_plugin_group": "【{{name}}】Deleted plug-in grouping named [{{groupName}}]", - "log_admin_delete_template_type": "【{{name}}】Deleted the template classification named [{{typeName}}]", - "log_admin_finish_invoice": "【{{name}}】Issued an invoice to a team named [{{teamName}}]", - "log_admin_login": "【{{name}}】Logined in the administrator background", - "log_admin_save_template_type": "【{{name}}】Added template classification called [{{typeName}}]", - "log_admin_send_system_inform": "【{{name}}】Sent a system notification titled [{{informTitle}}], with the level of [{{level}}]", - "log_admin_update_app_template": "【{{name}}】Updated template information named [{{templateName}}]", - "log_admin_update_plan": "【{{name}}】Edited the package information of the team with the team id [{{teamId}}]", - "log_admin_update_plugin": "【{{name}}】Updated plugin information called [{{pluginName}}]", - "log_admin_update_plugin_group": "【{{name}}】Updated plug-in grouping called [{{groupName}}]", - "log_admin_update_system_config": "【{{name}}】Updated system configuration", - "log_admin_update_system_modal": "【{{name}}】The system announcement configuration was carried out", - "log_admin_update_team": "[{{name}}] Replace the team editing information named [{{teamName}}] to the team name: [{{newTeamName}}], balance: [{{newBalance}}]", - "log_admin_update_user": "Modify the user information of 【{{userName}}】", - "log_assign_permission": "[{{name}}] Updated the permissions of [{{objectName}}]: [Application creation: [{{appCreate}}], Knowledge Base: [{{datasetCreate}}], API Key: [{{apiKeyCreate}}], Management: [{{manage}}]]", - "log_change_department": "【{{name}}】Updated department【{{departmentName}}】", - "log_change_member_name": "【{{name}}】Rename member [{{memberName}}] to 【{{newName}}】", - "log_change_member_name_self": "【{{name}}】Change your member name to 【{{newName}}】", - "log_change_notification_settings": "【{{name}}】A change notification receiving method operation was carried out", - "log_change_password": "【{{name}}】The password change operation was performed", - "log_create_api_key": "【{{name}}】Create an API key named [{{keyName}}]", - "log_create_app": "【{{name}}】Created [{{appType}}] named [{{appName}}]", - "log_create_app_copy": "【{{name}}] Created a copy of [{{appType}}] named [{{appName}}]", - "log_create_app_folder": "【{{name}}】Create a folder named [{{folderName}}]", - "log_create_app_publish_channel": "[{{name}}] Created a channel named [{{channelName}}] for [{{appType}}] called [{{appName}}].", - "log_create_collection": "[{{name}}] Create a collection named [{{collectionName}}] in [{{datasetType}}] called [{{datasetName}}].", - "log_create_data": "[{{name}}] Insert data into a collection named [{{datasetName}}] in [{{datasetType}}] called [{{datasetName}}] into a collection named [{{collectionName}}]", - "log_create_dataset": "【{{name}}】Created 【{{datasetType}}】 named 【{{datasetName}}】", - "log_create_dataset_folder": "【{{name}}】Created a folder named {{folderName}}】", - "log_create_department": "【{{name}}】Department【{{departmentName}}】", - "log_create_group": "【{{name}}】Created group [{{groupName}}]", - "log_create_invitation_link": "【{{name}}】Created invitation link【{{link}}】", - "log_create_invoice": "【{{name}}】Invoice operation was carried out", - "log_delete_api_key": "【{{name}}】Deleted the API key named [{{keyName}}]", - "log_delete_app": "【{{name}}】Delete the [{{appType}}] named [{{appName}}]", - "log_delete_app_collaborator": "【{{name}}】Delete the [itemName] permission named [itemValueName] in [{{appType}}] named [{{appName}}] delete the [itemName] permission named [{{appName}}] named [{{appName}}] named [{{appName}}] deleted the [{{itemName}}] permission named [{{itemValueName}}] named [{{appType}}] named [{{appName}}].", - "log_delete_app_publish_channel": "[{{name}}] [{{appType}}] named [{{appName}}] deleted the channel named [{{channelName}}]", - "log_delete_collection": "[{{name}}] Deleted a collection named [{{collectionName}}] in [{{datasetType}}] named [{{datasetName}}].", - "log_delete_data": "[{{name}}] Delete data in a collection named [{{datasetName}}] in a collection named [{{datasetName}}]", - "log_delete_dataset": "【{{name}}】Deleted 【{{datasetType}}】 named [{{datasetName}}]", - "log_delete_dataset_collaborator": "【{{name}}】Updated the collaborators of 【{{appType}}】 named 【{{appName}}】 to: Organization: 【{{orgList}}】, Group: 【{{groupList}}】, Member 【{{tmbList}}】; updated the permissions to: Read permission: 【{{readPermission}}】, Write permission: 【{{writePermission}}】, Administrator permission: 【{{managePermission}}】", - "log_delete_department": "{{name}} deleted department {{departmentName}}", - "log_delete_evaluation": "【{{name}}】Deleted the evaluation data of [{{appType}}] named [{{appName}}]", - "log_delete_group": "{{name}} deleted group {{groupName}}", + "leave_team_failed": "Failed to leave the team.", + "log_admin_add_plan": "{{name}} added a plan for the team ({{teamId}}).", + "log_admin_add_user": "{{name}} created the user ({{userName}}).", + "log_admin_change_license": "{{name}} changed the license.", + "log_admin_create_app_template": "{{name}} added the template ({{templateName}}).", + "log_admin_create_plugin": "{{name}} added the plugin ({{pluginName}}).", + "log_admin_create_plugin_group": "{{name}} created the plugin group ({{groupName}}).", + "log_admin_delete_app_template": "{{name}} deleted the template ({{templateName}}).", + "log_admin_delete_plugin": "{{name}} deleted the plugin ({{pluginName}}).", + "log_admin_delete_plugin_group": "{{name}} deleted the plugin group ({{groupName}}).", + "log_admin_delete_template_type": "{{name}} deleted the template category ({{typeName}}).", + "log_admin_finish_invoice": "{{name}} issued an invoice for the team ({{teamName}}).", + "log_admin_login": "{{name}} logged in to the admin platform.", + "log_admin_save_template_type": "{{name}} added the template category ({{typeName}}).", + "log_admin_send_system_inform": "{{name}} sent a system notification titled {{informTitle}} with priority {{level}}.", + "log_admin_update_app_template": "{{name}} updated the template ({{templateName}}).", + "log_admin_update_plan": "{{name}} edited plan information for the team ({{teamId}}).", + "log_admin_update_plugin": "{{name}} updated the plugin ({{pluginName}}).", + "log_admin_update_plugin_group": "{{name}} updated the plugin group ({{groupName}}).", + "log_admin_update_system_config": "{{name}} updated system configuration.", + "log_admin_update_system_modal": "{{name}} configured a system announcement.", + "log_admin_update_team": "{{name}} edited the information (Name: {{newTeamName}}, Balance: {{newBalance}}) of the team ({{teamName}}).", + "log_admin_update_user": "Information of the user ({{userName}}) was modified.", + "log_assign_permission": "{{name}} updated the permissions (App creation: {{appCreate}}, Knowledge base: {{datasetCreate}}, API key: {{apiKeyCreate}}, and Management: {{manage}}) of {{objectName}}.", + "log_change_department": "{{name}} updated the department ({{departmentName}}).", + "log_change_member_name": "{{name}} renamed changed the name of the member from {{memberName}} to {{newName}}.", + "log_change_member_name_self": "{{name}} changed their own name from {{oldName}} to {{newName}}.", + "log_change_notification_settings": "{{name}} changed the notification recipient.", + "log_change_password": "{{name}} changed the password.", + "log_create_api_key": "{{name}} created the API key ({{keyName}}).", + "log_create_app": "{{name}} created the {{appType}} app ({{appName}}).", + "log_create_app_copy": "{{name}} created a replica for the {{appType}} app ({{appName}}).", + "log_create_app_folder": "{{name}} created the folder ({{folderName}}).", + "log_create_app_publish_channel": "{{name}} created the channel ({{channelName}}) for the {{appType}} app ({{appName}}).", + "log_create_collection": "{{name}} created the collection ({{collectionName}}) in the {{datasetType}} named {{datasetName}}.", + "log_create_data": "{{name}} inserted data into the collection named {{collectionName}} in the {{datasetType}} named {{datasetName}}.", + "log_create_dataset": "{{name}} deleted the {{datasetType}} named {{datasetName}}.", + "log_create_dataset_folder": "{{name}} created the folder ({{folderName}}).", + "log_create_department": "{{name}} created the department ({{departmentName}}).", + "log_create_group": "{{name}} created the group ({{groupName}}).", + "log_create_invitation_link": "{{name}} created the invitation link ({{link}}).", + "log_create_invoice": "{{name}} issued an invoice.", + "log_delete_api_key": "{{name}} deleted the API key ({{keyName}}).", + "log_delete_app": "{{name}} deleted the {{appType}} app ({{appName}}).", + "log_delete_app_collaborator": "{{name}} deleted the {{itemName}} permission ({{itemValueName}}) in the {{appType}} app ({{appName}}).", + "log_delete_app_publish_channel": "{{name}} deleted the channel ({{channelName}}) from the {{appType}} app ({{appName}}).", + "log_delete_collection": "{{name}} deleted the collection ({{collectionName}}) from the {{datasetType}} named {{datasetName}}.", + "log_delete_data": "{{name}} deleted data from the collection named {{collectionName}} in the {{datasetType}} named {{datasetName}}.", + "log_delete_dataset": "{{name}} deleted the {{datasetType}} named {{datasetName}}.", + "log_delete_dataset_collaborator": "{{name}} deleted the {{itemName}} permission ({{itemValueName}}) from the {{datasetType}} named {{datasetName}}.", + "log_delete_department": "{{name}} deleted the department ({{departmentName}}).", + "log_delete_evaluation": "{{name}} deleted evaluation data for the {{appType}} app ({{appName}}).", + "log_delete_group": "{{name}} deleted the group ({{groupName}}).", "log_details": "Details", - "log_export_app_chat_log": "【{{name}}】Export a chat history called [{{appName}}] called [{{appType}}]", - "log_export_bill_records": "【{{name}}】Export the billing record", - "log_export_dataset": "[{{name}}] Export [{{datasetType}}] called [{{datasetName}}]", - "log_join_team": "【{{name}}】Join the team through the invitation link 【{{link}}】", - "log_kick_out_team": "{{name}} removed member {{memberName}}", - "log_login": "【{{name}}】Logined in the system", - "log_move_app": "【{{name}}】Move [{{appType}}] named [{{appName}}] to [{{targetFolderName}}]", - "log_move_dataset": "【{{name}}】Move [{{datasetType}}] named [{{datasetName}}] to [{{targetFolderName}}]", - "log_purchase_plan": "【{{name}}】Purchased the set meal", - "log_recover_team_member": "【{{name}}】Restored member【{{memberName}}】", - "log_relocate_department": "【{{name}}】Displayed department【{{departmentName}}】", - "log_retrain_collection": "[{{name}}] Retrained the collection named [{{collectionName}}] in [{{datasetType}}] called [{{datasetName}}].", - "log_search_test": "【{{name}}】Perform a search test operation on [{{datasetType}}] named [{{datasetName}}]", - "log_set_invoice_header": "【{{name}}】The invoice header operation was set up", - "log_time": "Operation time", - "log_transfer_app_ownership": "【{{name}}] Transfer ownership of [{{appType}}] named [{{appName}}] from [{oldOwnerName}}] to [{{newOwnerName}}]", - "log_transfer_dataset_ownership": "[{{name}}] Transfer ownership of [{{datasetType}}] named [{{datasetName}}] from [{oldOwnerName}}] to [{{newOwnerName}}]", - "log_type": "Operation Type", - "log_update_api_key": "【{{name}}】Updated the API key named [{{keyName}}]", - "log_update_app_collaborator": "[{{name}}] Updated the collaborator named [{{appName}}] to: Organization: [{{orgList}}], Group: [{{groupList}}], Member [{{tmbList}}]; permissions updated to: Read permission: [{{readPermission}}], Write permission: [{{writePermission}}], Administrator permission: [{{managePermission}}]", - "log_update_app_info": "[{{name}}] updated [{{appType}}] named [{{appName}}]: [{{newItemNames}}] to [{{newItemValues}}]", - "log_update_app_publish_channel": "[{{name}}] Updated a channel named [{{channelName}}] for [{{appType}}] called [{{appName}}].", - "log_update_collection": "[{{name}}] Updated a collection named [{{collectionName}}] in [{{datasetType}}] called [{{datasetName}}].", - "log_update_data": "【{{name}}】Update data in a collection named 【{{datasetName}}】[{{datasetType}}] with [{{datasetType}}] with [{{collectionName}}]", - "log_update_dataset": "【{{name}}】Updated [{{datasetType}}] named [{{datasetName}}]", - "log_update_dataset_collaborator": "[{{name}}] Updated the collaborator named [{{datasetName}}] to: Organization: [{{orgList}}], Group: [{{groupList}}], Member [{{tmbList}}]; permissions updated to: [{{readPermission}}], [{{writePermission}}], [{{managePermission}}]", - "log_update_publish_app": "【{{name}}】【{{operationName}}】【{{appType}}】 named [{{appName}}】", + "log_export_app_chat_log": "{{name}} exported the chat history of the {{appType}} app ({{appName}}).", + "log_export_bill_records": "{{name}} exported the billing records.", + "log_export_dataset": "{{name}} exported the {{datasetType}} named {{datasetName}}.", + "log_join_team": "{{name}} joined the team using the invitation link ({{link}}).", + "log_kick_out_team": "{{name}} removed the member ({{memberName}}).", + "log_login": "{{name}} logged into the system.", + "log_move_app": "{{name}} moved the {{appType}} app ({{appName}}) to the folder ({{targetFolderName}}).", + "log_move_dataset": "{{name}} moved the {{datasetType}} named {{datasetName}} to the folder ({{targetFolderName}}).", + "log_purchase_plan": "{{name}} purchased a plan.", + "log_recover_team_member": "{{name}} restored the member ({{memberName}}).", + "log_relocate_department": "{{name}} moved the department ({{departmentName}}).", + "log_retrain_collection": "{{name}} retrained the collection named {{collectionName}} in the {{datasetType}} named {{datasetName}}.", + "log_search_test": "{{name}} performed a search test in the {{datasetType}} named {{datasetName}}.", + "log_set_invoice_header": "{{name}} set the invoice header.", + "log_time": "Time", + "log_transfer_app_ownership": "{{name}} transferred ownership of the {{appType}} app ({{appName}}) from {{oldOwnerName}} to {{newOwnerName}}.", + "log_transfer_dataset_ownership": "{{name}} transferred ownership of the {{datasetType}} named {{datasetName}} from {{oldOwnerName}} to {{newOwnerName}}.", + "log_type": "Operation", + "log_update_api_key": "{{name}} updated the API key ({{keyName}}).", + "log_update_app_collaborator": "{{name}} updated the collaborators (Organizations: {{orgList}}, Groups: {{groupList}}, Members: {{tmbList}}) and permissions (Read: {{readPermission}}, Write: {{writePermission}}, Admin: {{managePermission}}) of the {{appType}} app ({{appName}}).", + "log_update_app_info": "{{name}} updated the {{appType}} app ({{appName}}): {{newItemNames}} set to {{newItemValues}}.", + "log_update_app_publish_channel": "{{name}} updated the channel ({{channelName}}) for the {{appType}} app ({{appName}}).", + "log_update_collection": "{{name}} updated the collection {{collectionName}} in the {{datasetType}} named {{datasetName}}.", + "log_update_data": "{{name}} updated data in the collection {{collectionName}} in the {{datasetType}} named {{datasetName}}.", + "log_update_dataset": "{{name}} updated the {{datasetType}} named {{datasetName}}.", + "log_update_dataset_collaborator": "{{name}} updated the collaborators (Organizations: {{orgList}}, Groups: {{groupList}}, Members: {{tmbList}}) and permissions ({{readPermission}}, {{writePermission}}, {{managePermission}}) of the {{datasetType}} named {{datasetName}}.", + "log_update_publish_app": "{{name}} performed the operation ({{operationName}}) on the {{appType}} app ({{appName}}).", "log_user": "Operator", "login": "Log in", - "manage_member": "Managing members", - "member": "member", - "member_group": "Belonging to member group", - "move_app": "App location movement", - "move_dataset": "Mobile Knowledge Base", + "manage_member": "Manage member", + "member": "Member", + "member_group": "Group", + "move_app": "Move app", + "move_dataset": "Move knowledge base", "move_member": "Move member", - "move_org": "Move organization", - "notification_recieve": "Team notification reception", - "org": "organization", - "org_description": "Organization description", - "org_name": "Organization name", - "owner": "owner", - "permission": "Permissions", - "permission_apikeyCreate": "Create API Key", - "permission_apikeyCreate_Tip": "You can create global APIKey and MCP services", - "permission_appCreate": "Create Application", - "permission_appCreate_tip": "Can create applications in the root directory (creation permissions in folders are controlled by the folder)", - "permission_datasetCreate": "Create Knowledge Base", - "permission_datasetCreate_Tip": "Can create knowledge bases in the root directory (creation permissions in folders are controlled by the folder)", - "permission_manage": "Admin", - "permission_manage_tip": "Can manage members, create groups, manage all groups, and assign permissions to groups and members", - "please_bind_contact": "Please bind the contact information", - "purchase_plan": "Upgrade package", - "recover_team_member": "Member Recovery", - "relocate_department": "Department Mobile", - "remark": "remark", - "remove_tip": "Confirm to remove {{username}} from the team?", - "restore_tip": "Confirm to join the team {{username}}? \nOnly the availability and related permissions of this member account are restored, and the resources under the account cannot be restored.", - "restore_tip_title": "Recovery confirmation", - "retain_admin_permissions": "Keep administrator rights", - "retrain_collection": "Retrain the set", - "save_and_publish": "save and publish", - "search_log": "Search log", - "search_member": "Search for members", - "search_member_group_name": "Search member/group name", - "search_org": "Search Department", - "search_test": "Search Test", - "set_invoice_header": "Set up invoice header", - "set_name_avatar": "Team avatar", - "sync_immediately": "Synchronize now", - "sync_member_failed": "Synchronization of members failed", - "sync_member_success": "Synchronize members successfully", - "total_team_members": "Total {{amount}} members", + "move_org": "Move department", + "notification_recieve": "Notification recipient", + "org": "Department", + "org_description": "Description", + "org_name": "Department name", + "owner": "Owner", + "permission": "Permission", + "permission_apikeyCreate": "Create API key", + "permission_apikeyCreate_Tip": "Create global API keys and MCP services.", + "permission_appCreate": "Create app", + "permission_appCreate_tip": "Create apps in the root directory. (Permissions within folders are controlled by the folder.)", + "permission_datasetCreate": "Create knowledge base", + "permission_datasetCreate_Tip": "Create knowledge bases in the root directory. (Permissions within folders are controlled by the folder.)", + "permission_manage": "Administrator", + "permission_manage_tip": "Manage members, create groups, manage all groups, and assign permissions to groups and members.", + "please_bind_contact": "Please specify contact information.", + "purchase_plan": "Upgrade plan", + "recover_team_member": "Restore member", + "relocate_department": "Move department", + "remark": "Remarks", + "remove_tip": "Are you sure you want to remove the member ({{username}}) from the team? The member will be marked as Left, their operation data will not be deleted, and resources under their account will be automatically transferred to the team owner.", + "restore_tip": "Are you sure you want to add the member ({{username}}) to the team? The member's account and related permissions will be restored, but the account resources will not be recovered.", + "restore_tip_title": "Confirm", + "retain_admin_permissions": "Retain admin permissions", + "retrain_collection": "Retrain collection", + "save_and_publish": "Save and publish", + "search_log": "Log", + "search_member": "Member", + "search_member_group_name": "Member name, group name", + "search_org": "Department", + "search_test": "Test", + "set_invoice_header": "Set invoice title", + "set_name_avatar": "Team avatar & name", + "sync_immediately": "Sync now", + "sync_member_failed": "Failed to sync members.", + "sync_member_success": "Members synced successfully.", + "total_team_members": "Total members: {{amount}}", "transfer_app_ownership": "Transfer app ownership", - "transfer_dataset_ownership": "Transfer dataset ownership", + "transfer_dataset_ownership": "Transfer knowledge base ownership", "transfer_ownership": "Transfer ownership", "type.Folder": "Folder", - "type.Http plugin": "HTTP Plugin", + "type.Http plugin": "HTTP plugin", "type.Plugin": "Plugin", - "type.Simple bot": "Simple App", + "type.Simple bot": "Simple app", "type.Tool": "Tool", - "type.Tool set": "Toolset", + "type.Tool set": "toolkit", "type.Workflow bot": "Workflow", "unlimited": "Unlimited", - "update": "update", + "update": "Update", "update_api_key": "Update API key", - "update_app_collaborator": "Apply permission changes", - "update_app_info": "Application information modification", - "update_app_publish_channel": "Update the release channel", - "update_collection": "Update the collection", + "update_app_collaborator": "Change app permission", + "update_app_info": "Edit app info", + "update_app_publish_channel": "Update publishing channel", + "update_collection": "Update collection", "update_data": "Update data", - "update_dataset": "Update the knowledge base", - "update_dataset_collaborator": "Knowledge Base Permission Changes", - "update_publish_app": "Application update", - "used_times_limit": "Limit", - "user_name": "username", - "user_team_invite_member": "Invite members", - "user_team_leave_team": "Leave the team", - "user_team_leave_team_failed": "Failure to leave the team", - "waiting": "To be accepted" + "update_dataset": "Update knowledge base", + "update_dataset_collaborator": "Change Knowledge base permission", + "update_publish_app": "App update", + "used_times_limit": "Active users", + "user_name": "Username", + "user_team_invite_member": "Invite member", + "user_team_leave_team": "Leave team", + "user_team_leave_team_failed": "Failed to leave the team.", + "waiting": "Pending" } diff --git a/packages/web/i18n/en/account_thirdParty.json b/packages/web/i18n/en/account_thirdParty.json index a0b7dc46d141..f9bd1a1529db 100644 --- a/packages/web/i18n/en/account_thirdParty.json +++ b/packages/web/i18n/en/account_thirdParty.json @@ -1,20 +1,20 @@ { "configured": "Configured", - "error.no_permission": "Please contact the administrator to configure", - "get_usage_failed": "Failed to get usage", - "laf_account": "laf account", - "no_intro": "No explanation yet", + "error.no_permission": "Please contact the administrator.", + "get_usage_failed": "Failed to obtain usage.", + "laf_account": "LAF account", + "no_intro": "No data available.", "not_configured": "Not configured", - "open_api_notice": "You can fill in the relevant key of OpenAI/OneAPI. \nIf you fill in this content, the online platform using [AI Dialogue], [Problem Classification] and [Content Extraction] will use the Key you filled in, and there will be no charge. \nPlease pay attention to whether your Key has permission to access the corresponding model. \nGPT models can choose FastAI.", + "open_api_notice": "You can enter an OpenAI/OneAPI key. The key will be used for AI Conversation, Question Classification, and Content Extraction without extra charges. Make sure the key can be used to access the required models. You can choose FastAI as the GPT model.", "openai_account_configuration": "OpenAI/OneAPI account", - "openai_account_setting_exception": "Setting up an exception to OpenAI account", - "request_address_notice": "Request address, default is openai official. \nThe forwarding address can be filled in, but \\\"v1\\\" is not automatically completed.", + "openai_account_setting_exception": "Failed to set OpenAI account.", + "request_address_notice": "Request address. Default: official OpenAI address. You can enter a proxy address. Note that \"/v1\" will not be automatically appended to the address.", "third_party_account": "Third-party account", "third_party_account.configured": "Configured", "third_party_account.not_configured": "Not configured", - "third_party_account_desc": "The administrator can configure third-party accounts or variables here, and the account will be used by all team members.", - "unavailable": "Get usage exception", - "usage": "Usage", - "value_not_return_tip": "After the parameters are configured, they will not return to the front end again and do not need to be leaked to other members.", - "value_placeholder": "Enter parameter values. \nEntering a null value means deleting the configuration." -} + "third_party_account_desc": "The admin can configure third-party accounts or variables, and these accounts will be available to all team members.", + "unavailable": "Failed to obtain usage.", + "usage": "Usage:", + "value_not_return_tip": "The configured parameter will not be shared with other members because it will not be returned to the frontend.", + "value_placeholder": "Enter the parameter value. If the parameter is left blank, the configuration will be deleted." +} \ No newline at end of file diff --git a/packages/web/i18n/en/app.json b/packages/web/i18n/en/app.json index 44ebbdddb85d..a5b6fca82e7f 100644 --- a/packages/web/i18n/en/app.json +++ b/packages/web/i18n/en/app.json @@ -1,18 +1,18 @@ { "AutoOptimize": "Automatic optimization", - "Click_to_delete_this_field": "Click to delete this field", - "Filed_is_deprecated": "This field is deprecated", + "Click_to_delete_this_field": "Click to delete field", + "Filed_is_deprecated": "The field has been deprecated.", "Index": "Index", - "MCP_tools_debug": "debug", - "MCP_tools_detail": "check the details", - "MCP_tools_list": "Tool list", - "MCP_tools_list_is_empty": "MCP tool not resolved", - "MCP_tools_list_with_number": "Tool list: {{total}}", - "MCP_tools_parse_failed": "Failed to parse MCP address", - "MCP_tools_url": "MCP Address", - "MCP_tools_url_is_empty": "The MCP address cannot be empty", - "MCP_tools_url_placeholder": "After filling in the MCP address, click Analysis", - "No_selected_dataset": "No selected dataset", + "MCP_tools_debug": "Debug", + "MCP_tools_detail": "View details", + "MCP_tools_list": "Tools", + "MCP_tools_list_is_empty": "Failed to parse the MCP address.", + "MCP_tools_list_with_number": "Tools: {{total}}", + "MCP_tools_parse_failed": "Failed to parse the MCP address.", + "MCP_tools_url": "MCP address", + "MCP_tools_url_is_empty": "MCP address is required.", + "MCP_tools_url_placeholder": "Enter the MCP address and then click Parse.", + "No_selected_dataset": "No knowledge bases selected.", "Optimizer_CloseConfirm": "Confirm to close", "Optimizer_CloseConfirmText": "Optimization results have been generated, confirming that closing will lose the current result. Will it continue?", "Optimizer_EmptyPrompt": "Please enter optimization requirements", @@ -21,307 +21,307 @@ "Optimizer_Placeholder_loading": "Generating...please wait", "Optimizer_Reoptimize": "Re-optimize", "Optimizer_Replace": "replace", - "Optimizer_Tooltip": "AI optimization prompt words", - "Role_setting": "Permission", - "Run": "Execute", - "Search_dataset": "Search dataset", + "Optimizer_Tooltip": "AI optimization prompt words", + "Role_setting": "Permissions", + "Run": "Run", + "Search_dataset": "Knowledge base", "Selected": "Selected", - "Team_Tags": "Team tags", - "ai_point_price": "Billing", - "ai_settings": "AI Configuration", - "all_apps": "All Applications", - "app.Version name": "Version Name", - "app.error.publish_unExist_app": "Release failed, please check whether the tool call is normal", - "app.error.unExist_app": "Some components are missing, please delete them", - "app.modules.click to update": "Click to Refresh", + "Team_Tags": "Team tag", + "ai_point_price": "Billing based on AI points", + "ai_settings": "AI configuration", + "all_apps": "All apps", + "app.Version name": "Version name", + "app.error.publish_unExist_app": "Failed to publish the app. Please check whether the tool is called properly.", + "app.error.unExist_app": "Some components are missing. Please delete them.", + "app.modules.click to update": "Update", "app.modules.has new version": "New Version Available", - "app.modules.not_found": "Not Found", - "app.version_current": "Current Version", - "app.version_initial": "Initial Version", - "app.version_name_tips": "Version name cannot be empty", - "app.version_past": "Previously Published", - "app.version_publish_tips": "This version will be saved to the team cloud, synchronized with the entire team, and update the app version on all release channels.", - "app_detail": "Application Details", - "auto_execute": "Automatic execution", - "auto_execute_default_prompt_placeholder": "Default questions sent when executing automatically", - "auto_execute_tip": "After turning it on, the workflow will be automatically triggered when the user enters the conversation interface. \nExecution order: 1. Dialogue starter; 2. Global variables; 3. Automatic execution.", + "app.modules.not_found": "Components are missing.", + "app.version_current": "Current version", + "app.version_initial": "Initial version", + "app.version_name_tips": "Version name is required.", + "app.version_past": "Published", + "app.version_publish_tips": "The version will be saved to the cloud and be available to all team members. The app version for all publishing channels will be updated to this version.", + "app_detail": "App details", + "auto_execute": "Auto execution", + "auto_execute_default_prompt_placeholder": "Default questions sent during auto execution", + "auto_execute_tip": "If enabled, the workflow will be automatically executed after users enter the chat interface. Execution order: 1. Chat greetings. 2. Global variables. 3. App auto execution.", "auto_save": "Auto save", - "chat_debug": "Chat Preview", - "chat_logs": "Logs", - "chat_logs_tips": "Logs will record the online, shared, and API (requires chatId) conversation records of this app.", - "config_ai_model_params": "Click to configure AI model related properties", - "config_file_upload": "Click to Configure File Upload Rules", - "config_question_guide": "Configuration guess you want to ask", - "confirm_copy_app_tip": "The system will create an app with the same configuration for you, but permissions will not be copied. Please confirm!", - "confirm_del_app_tip": "Are you sure you want to delete 【{{name}}】 and all of its chat history?", - "confirm_delete_folder_tip": "Confirm to delete this folder? All apps and corresponding conversation records under it will be deleted. Please confirm!", - "copy_one_app": "Create Duplicate", - "core.app.QG.Switch": "Enable guess what you want to ask", - "core.dataset.import.Custom prompt": "Custom Prompt", - "create_by_curl": "By CURL", - "create_by_template": "By template", - "create_copy_success": "Duplicate Created Successfully", - "create_empty_app": "Create Default App", - "create_empty_plugin": "Create Default Plugin", - "create_empty_workflow": "Create Default Workflow", - "cron.every_day": "Run Daily", - "cron.every_month": "Run Monthly", - "cron.every_week": "Run Weekly", - "cron.interval": "Run at Intervals", - "dataset": "dataset", - "dataset_search_tool_description": "Call the \"Semantic Search\" and \"Full-text Search\" capabilities to find reference content that may be related to the problem from the \"Knowledge Base\". \nPrioritize calling this tool to assist in answering user questions.", + "chat_debug": "Debugging preview", + "chat_logs": "Chat logs", + "chat_logs_tips": "The online chats, shared chats, and API-based chats (chat ID is required) on the app will be recorded in the logs.", + "config_ai_model_params": "Configure AI model attributes", + "config_file_upload": "Configure file upload rules", + "config_question_guide": "Configure the guess what you want feature", + "confirm_copy_app_tip": "An app with the same configuration except for permissions will be created. Would you like to proceed?", + "confirm_del_app_tip": "Are you sure you want to delete {{name}} and its chat records?", + "confirm_delete_folder_tip": "Are you sure you want to delete the folder? All apps and chat records in the folder will also be deleted. Would you like to proceed?", + "copy_one_app": "Create replica", + "core.app.QG.Switch": "Enable guess what you want", + "core.dataset.import.Custom prompt": "Custom prompt", + "create_by_curl": "CURL", + "create_by_template": "Template", + "create_copy_success": "Replica created successfully.", + "create_empty_app": "Create blank app", + "create_empty_plugin": "Create blank plugin", + "create_empty_workflow": "Create blank workflow", + "cron.every_day": "Every day", + "cron.every_month": "Every month", + "cron.every_week": "Every week", + "cron.interval": "Other", + "dataset": "Knowledge base", + "dataset_search_tool_description": "Search reference materials related to the question from the knowledge base by using the semantic search and full-text search features. The tool is prioritized to help answer questions.", "day": "Day", - "deleted": "App deleted", - "document_quote": "Document Reference", - "document_quote_tip": "Usually used to accept user-uploaded document content (requires document parsing), and can also be used to reference other string data.", - "document_upload": "Document Upload", - "edit_app": "Application details", - "edit_info": "Edit", - "execute_time": "Execution Time", - "export_config_successful": "Configuration copied, some sensitive information automatically filtered. Please check for any remaining sensitive data.", - "export_configs": "Export", - "feedback_count": "User Feedback", - "file_quote_link": "Files", - "file_recover": "File will overwrite current content", - "file_upload": "File Upload", - "file_upload_tip": "Once enabled, documents/images can be uploaded. Documents are retained for 7 days, images for 15 days. Using this feature may incur additional costs. To ensure a good experience, please choose an AI model with a larger context length when using this feature.", - "go_to_chat": "Go to Conversation", - "go_to_run": "Go to Execution", - "image_upload": "Image Upload", - "image_upload_tip": "How to activate model image recognition capabilities", - "import_configs": "Import", - "import_configs_failed": "Import configuration failed, please ensure the configuration is correct!", - "import_configs_success": "Import Successful", - "initial_form": "initial state", - "interval.12_hours": "Every 12 Hours", - "interval.2_hours": "Every 2 Hours", - "interval.3_hours": "Every 3 Hours", - "interval.4_hours": "Every 4 Hours", - "interval.6_hours": "Every 6 Hours", - "interval.per_hour": "Every Hour", - "invalid_json_format": "JSON format error", - "keep_the_latest": "Keep the latest", - "llm_not_support_vision": "This model does not support image recognition", - "llm_use_vision": "Vision", - "llm_use_vision_tip": "After clicking on the model selection, you can see whether the model supports image recognition and the ability to control whether to start image recognition. \nAfter starting image recognition, the model will read the image content in the file link, and if the user question is less than 500 words, it will automatically parse the image in the user question.", - "log_chat_logs": "Dialogue log", + "deleted": "The app has been deleted.", + "document_quote": "Document reference", + "document_quote_tip": "It is commonly used to process content uploaded by users (document parsing is required). It can also be used to reference other string entries.", + "document_upload": "Document upload", + "edit_app": "App details", + "edit_info": "Edit info", + "execute_time": "Time executed", + "export_config_successful": "The configuration has been copied, and some sensitive information has been automatically filtered out. Please check if any sensitive data still exists.", + "export_configs": "Export configuration", + "feedback_count": "User feedback", + "file_quote_link": "File link", + "file_recover": "The file will overwrite the existing content.", + "file_upload": "File upload", + "file_upload_tip": "If enabled, you can upload documents and images. Documents are retained for 7 days, and images for 15 days. Using this feature may generate significant additional costs. To ensure optimal user experience, when this feature is enabled, please select an AI model with a larger context length.", + "go_to_chat": "Chat now", + "go_to_run": "Run now", + "image_upload": "Image upload", + "image_upload_tip": "How to enable image recognition for a model", + "import_configs": "Import configuration", + "import_configs_failed": "Failed to import the configuration. Please make sure that the configuration is valid.", + "import_configs_success": "Import successful.", + "initial_form": "Initial status", + "interval.12_hours": "Every 12 hours", + "interval.2_hours": "Every 2 hours", + "interval.3_hours": "Every 3 hours", + "interval.4_hours": "Every 4 hours", + "interval.6_hours": "Every 6 hours", + "interval.per_hour": "Every hour", + "invalid_json_format": "Please upload a valid JSON file.", + "keep_the_latest": "Auto update to the latest version", + "llm_not_support_vision": "This model does not support image recognition.", + "llm_use_vision": "Image recognition", + "llm_use_vision_tip": "You can select a model and then check whether it supports image recognition and decide whether to enable this feature. If the image recognition feature is enabled, the model will read image contents from file links. If a question is less than 500 characters, the model will automatically parse images in the question.", + "log_chat_logs": "Chat logs", "log_detail": "Log details", - "logs_app_data": "Data board", - "logs_app_result": "Application effect", - "logs_average_response_time": "Average run time", - "logs_average_response_time_description": "Average of total workflow run time", - "logs_chat_count": "Number of sessions", - "logs_chat_count_description": "How many new sessions does this application create? \nSession definition: When the interval between the previous message exceeds 15 minutes, it is considered to be a new session (this definition only takes effect here)", - "logs_chat_data": "chat data", - "logs_chat_item_count": "Number of conversations", - "logs_chat_item_count_description": "How many conversations does this app generate? \nDialogue definition: The workflow runs once, and counts as a round of conversations", - "logs_chat_user": "user", - "logs_date": "date", - "logs_empty": "No logs yet~", - "logs_error_count": "Error Count", - "logs_error_rate": "Dialogue error ratio", - "logs_error_rate_description": "The proportion of the total number of dialogues reported in error", - "logs_export_confirm_tip": "There are currently {{total}} conversation records, and each conversation can export up to 100 latest messages. \nConfirm export?", - "logs_export_title": "Time, source, user, contact, title, total number of messages, user good feedback, user bad feedback, custom feedback, labeled answers, conversation details", + "logs_app_data": "Data dashboard", + "logs_app_result": "App performance", + "logs_average_response_time": "Avg uptime (s)", + "logs_average_response_time_description": "Avg total uptime of workflow", + "logs_chat_count": "Sessions", + "logs_chat_count_description": "The total number of new sessions created in the app. A new session is created if the interval between the new message and the last message exceeds 15 minutes. (This definition only applies here.)", + "logs_chat_data": "Chat data", + "logs_chat_item_count": "Chats", + "logs_chat_item_count_description": "The total number of chats created in the app. A chat is a round of workflow execution.", + "logs_chat_user": "User", + "logs_date": "Date", + "logs_empty": "No logs available.", + "logs_error_count": "Errors", + "logs_error_rate": "Chat error rate", + "logs_error_rate_description": "The proportion of chats that encountered error to total chats", + "logs_export_confirm_tip": "The total number of chat records is {{total}}. Up to 100 latest messages can be exported from a chat. Would you like to proceed?", + "logs_export_title": "Time, Source, User, Contact info, Title, Total messages, Positive feedback, Negative feedback, Custom feedback, Marked answers, Chat details", "logs_good_feedback": "Like", - "logs_key_config": "Field Configuration", - "logs_keys_annotatedCount": "Annotated Answer Count", - "logs_keys_chatDetails": "Conversation details", - "logs_keys_createdTime": "Created Time", - "logs_keys_customFeedback": "Custom Feedback", - "logs_keys_errorCount": "Error Count", - "logs_keys_feedback": "User Feedback", - "logs_keys_lastConversationTime": "Last Conversation Time", - "logs_keys_messageCount": "Message Count", - "logs_keys_points": "Points Consumed", - "logs_keys_responseTime": "Average Response Time", + "logs_key_config": "Field configuration", + "logs_keys_annotatedCount": "Marked answers", + "logs_keys_chatDetails": "Conversation details", + "logs_keys_createdTime": "Time created", + "logs_keys_customFeedback": "Custom feedback", + "logs_keys_errorCount": "Errors", + "logs_keys_feedback": "User feedback", + "logs_keys_lastConversationTime": "Last chat time", + "logs_keys_messageCount": "Total messages", + "logs_keys_points": "Points consumed", + "logs_keys_responseTime": "Avg response time", "logs_keys_sessionId": "Session ID", "logs_keys_source": "Source", "logs_keys_title": "Title", "logs_keys_user": "User", - "logs_message_total": "Total Messages", + "logs_message_total": "Total messages", "logs_new_user_count": "New users", - "logs_points": "Points Consumed", - "logs_points_description": "Points consumed by this application", - "logs_points_per_chat": "Average points consumption for a single session", - "logs_points_per_chat_description": "How many points are consumed on average for a workflow operation", - "logs_response_time": "Average Response Time", - "logs_search_chat": "Search for session title or session ID", - "logs_source": "source", - "logs_source_count_description": "Number of users across channels", + "logs_points": "Points consumed", + "logs_points_description": "Points consumed by the app", + "logs_points_per_chat": "Avg points consumed per session", + "logs_points_per_chat_description": "Avg points consumed per workflow execution", + "logs_response_time": "Avg response time", + "logs_search_chat": "Session title or ID", + "logs_source": "Source", + "logs_source_count_description": "Users by channel", "logs_title": "Title", - "logs_total": "Grand total", - "logs_total_avg_points": "Average consumption", - "logs_total_chat": "Cumulative conversation count", - "logs_total_error": "{{count}} errors were reported in total, and the error rate was: {{rate}} %", - "logs_total_points": "Accumulated points consumption", - "logs_total_tips": "Cumulative indicators are not affected by time filtering", - "logs_total_users": "Cumulative number of users", - "logs_user_count": "Number of users", - "logs_user_count_description": "Number of people who have a conversation with the app in unit time", + "logs_total": "Total", + "logs_total_avg_points": "Avg points consumed", + "logs_total_chat": "Total chats", + "logs_total_error": "Total errors: {{count}}, Error rate: {{rate}}%", + "logs_total_points": "Total points consumed", + "logs_total_tips": "Cumulative metrics are not affected by the time filter.", + "logs_total_users": "Total users", + "logs_user_count": "Users", + "logs_user_count_description": "The number of users who created chats in the app in the period", "logs_user_data": "User data", "logs_user_feedback": "User feedback", - "logs_user_feedback_description": "Like: Number of likes from users\n\nStep on: Users step on the number of points", - "logs_user_retention": "User retention", - "logs_user_retention_description": "Number of users who have added new users during the T cycle and are active in the T 1 cycle", - "look_ai_point_price": "View all model billing standards", - "manual_secret": "Manual secret", - "mark_count": "Number of Marked Answers", - "max_histories_number": "Max histories", - "max_histories_number_tip": "The maximum number of rounds of dialogue that the model can carry into memory. If the memory exceeds the model context, the system will force truncation. \nTherefore, even if 30 rounds of dialogue are configured, the actual number may not reach 30 rounds during operation.", + "logs_user_feedback_description": "Likes: Number of user likes\nDislikes: Number of user dislikes", + "logs_user_retention": "Retained users", + "logs_user_retention_description": "The number of new users in period T who were active in period T+1", + "look_ai_point_price": "View billing standard by model", + "manual_secret": "Temporary secret key", + "mark_count": "Marked answers", + "max_histories_number": "Chats remembered", + "max_histories_number_tip": "The maximum number of chats that a model can remember. If a chat exceeds the max context length, the system will automatically delete the excess part. Therefore, the model may not actually remember 30 chats even if you set this field to 30.", "max_tokens": "Max tokens", - "module.Custom Title Tip": "This title will be displayed during the conversation.", - "module.No Modules": "No Plugins Found", - "module.type": "\"{{type}}\" type\n{{description}}", - "modules.Title is required": "Module name cannot be empty", + "module.Custom Title Tip": "The title will be displayed in the chat.", + "module.No Modules": "Plugin not found.", + "module.type": "{{type}} type\n{{description}}", + "modules.Title is required": "Module name is required.", "month.unit": "Day", - "move.hint": "After moving, the selected application/folder will inherit the permission settings of the new folder, and the original permission settings will become invalid.", - "move_app": "Move Application", - "no_mcp_tools_list": "No data yet, the MCP address needs to be parsed first", - "node_not_intro": "This node is not introduced", - "not_json_file": "Please select a JSON file", - "not_the_newest": "Not the latest", + "move.hint": "If moved, the app or folder will inherit the permissions of the destination folder, and its current permissions will become invalid.", + "move_app": "Move app", + "no_mcp_tools_list": "No data available. The MCP address must be parsed first.", + "node_not_intro": "No description available for this node.", + "not_json_file": "Please select a JSON file.", + "not_the_newest": "Earlier version", "oaste_curl_string": "Enter CURL code", - "open_auto_execute": "Enable automatic execution", - "open_vision_function_tip": "Models with icon switches have image recognition capabilities. \nAfter being turned on, the model will parse the pictures in the file link and automatically parse the pictures in the user's question (user question ≤ 500 words).", - "or_drag_JSON": "or drag in JSON file", - "paste_config_or_drag": "Paste config or drag JSON file here", - "pdf_enhance_parse": "PDF enhancement analysis", - "pdf_enhance_parse_price": "{{price}}Points/page", - "pdf_enhance_parse_tips": "Calling PDF recognition model for parsing, you can convert it into Markdown and retain pictures in the document. At the same time, you can also identify scanned documents, which will take a long time to identify them.", - "permission.des.manage": "Based on write permissions, you can configure publishing channels, view conversation logs, and assign permissions to the application.", - "permission.des.read": "Use the app to have conversations", - "permission.des.readChatLog": "Can view chat logs", - "permission.des.write": "Can view and edit apps", - "permission.name.read": "Dialogue only", + "open_auto_execute": "Enable auto execute", + "open_vision_function_tip": "A model with a switch displayed supports image recognition. If enabled, the model will parse images from file links and will automatically parse images in the question (it takes effect when the question is less than 500 characters).", + "or_drag_JSON": "Drag & drop to upload JSON file", + "paste_config_or_drag": "Paste configuration or drag & drop a JSON file here", + "pdf_enhance_parse": "Enhanced PDF parsing", + "pdf_enhance_parse_price": "{{price}} points/page", + "pdf_enhance_parse_tips": "PDF recognition model supports parsing PDF files, converting PDF files into Markdown format with images preserved, and processing scanned copies of PDF files, which takes a longer time.", + "permission.des.manage": "Has write permissions and can configure publishing channels, view chat logs, and assign app permissions.", + "permission.des.read": "Chat using the app.", + "permission.des.readChatLog": "View chat logs.", + "permission.des.write": "View and edit the app.", + "permission.name.read": "Chat only", "permission.name.readChatLog": "View chat logs", - "plugin.Instructions": "Instructions", - "plugin_cost_by_token": "Charged based on token usage", - "plugin_cost_folder_tip": "This tool set contains subordinate tools, and the call points are determined based on the actual calling tool", - "plugin_cost_per_times": "{{cost}} points/time", - "plugin_dispatch": "Plugin Invocation", - "plugin_dispatch_tip": "Adds extra capabilities to the model. The specific plugins to be invoked will be autonomously decided by the model.\nIf a plugin is selected, the Dataset invocation will automatically be treated as a special plugin.", - "pro_modal_feature_1": "External organization structure integration and multi-tenancy", - "pro_modal_feature_2": "Team-exclusive application showcase page", - "pro_modal_feature_3": "Knowledge base enhanced indexing", - "pro_modal_later_button": "Maybe Later", - "pro_modal_subtitle": "Join the business edition now to unlock more premium features", - "pro_modal_title": "Business Edition Exclusive!", - "pro_modal_unlock_button": "Unlock Now", - "publish_channel": "Publish", - "publish_success": "Publish Successful", - "question_guide_tip": "After the conversation, 3 guiding questions will be generated for you.", - "reasoning_response": "Output thinking", + "plugin.Instructions": "Guide", + "plugin_cost_by_token": "Billing based on token consumption", + "plugin_cost_folder_tip": "The toolkit contains multiple tools. The points are charged based on the tools used.", + "plugin_cost_per_times": "{{cost}} points/call", + "plugin_dispatch": "Plugin call", + "plugin_dispatch_tip": "Enable the model to obtain external data. The model will automatically call plugins as needed, and all plugins will run in non-streaming mode.\nSelected plugins will be automatically called when the knowledge base is called.", + "pro_modal_feature_1": "Connection to external organizations and multi-tenancy", + "pro_modal_feature_2": "Dedicated app page for team", + "pro_modal_feature_3": "Enhanced knowledge base index", + "pro_modal_later_button": "Not now", + "pro_modal_subtitle": "Subscribe to the enterprise edition for advanced features", + "pro_modal_title": "Exclusive to enterprise edition", + "pro_modal_unlock_button": "Subscribe", + "publish_channel": "Publishing channel", + "publish_success": "Published successfully.", + "question_guide_tip": "Three suggested questions will be generated for you at the end of the chat.", + "reasoning_response": "Output reasoning process", "response_format": "Response format", "save_team_app_log_keys": "Save as team configuration", - "saved_success": "Saved successfully! \nTo use this version externally, click Save and Publish", - "search_app": "Search apps", - "search_tool": "Search Tools", - "secret_get_course": "Course", - "setting_app": "Workflow", - "setting_plugin": "Workflow", - "show_top_p_tip": "An alternative method of temperature sampling, called Nucleus sampling, the model considers the results of tokens with TOP_P probability mass quality. \nTherefore, 0.1 means that only tokens containing the highest probability quality are considered. \nThe default is 1.", - "simple_tool_tips": "This plugin contains special inputs and is not currently supported for invocation by simple applications.", - "source_updateTime": "Update time", - "stop_sign": "Stop", - "stop_sign_placeholder": "Multiple serial numbers are separated by |, for example: aaa|stop", - "stream_response": "Stream", - "stream_response_tip": "Turning this switch off forces the model to use non-streaming mode and will not output content directly. \nIn the output of the AI ​​reply, the content output by this model can be obtained for secondary processing.", - "sync_log_keys_popover_text": "The current field configuration is only valid for individuals. Do you need to save it to the team configuration?", - "sync_team_app_log_keys": "Restore to team configuration", - "system_secret": "System secret", - "systemval_conflict_globalval": "The variable name conflicts with the system variable, please use other variable names", - "team_tags_set": "Team tags", + "saved_success": "Changes saved successfully. Changes saved successfully. To make the version available externally, please click Save and publish.", + "search_app": "App name", + "search_tool": "Tool name", + "secret_get_course": "Guide", + "setting_app": "Application configuration", + "setting_plugin": "Plugin configuration", + "show_top_p_tip": "Nucleus sampling is an alternative to temperature sampling. By using this method, the model considers only tokens whose cumulative probability exceeds p. For example, if p is set to 0.1, the model will consider only tokens with the highest cumulative probability. Default value: 1.", + "simple_tool_tips": "The plugin cannot be called by simple apps because it contains special inputs.", + "source_updateTime": "Time updated", + "stop_sign": "Stop sequence", + "stop_sign_placeholder": "Separate multiple sequences with the pipe character (|). Example: aaa|stop", + "stream_response": "Output in streaming mode", + "stream_response_tip": "If disabled, the model must output contents in non-streaming mode, and the contents will not be output directly. You can obtain the output contents for further processing.", + "sync_log_keys_popover_text": "The field configuration will take effect only on your account. Would you like to save it as team configuration?", + "sync_team_app_log_keys": "Restore team configuration", + "system_secret": "System secret key", + "systemval_conflict_globalval": "The variable name conflicts with a system variable name. Please use another one.", + "team_tags_set": "Team tag", "temperature": "Temperature", - "temperature_tip": "Range 0~10. \nThe larger the value, the more divergent the model’s answer is; the smaller the value, the more rigorous the answer.", + "temperature_tip": "Range: 0-10. A higher value leads to more creative outputs, while a lower value leads to more deterministic outputs.", "template.hard_strict": "Strict Q&A template", - "template.hard_strict_des": "Based on the question and answer template, stricter requirements are imposed on the model's answers.", + "template.hard_strict_des": "A template based on Q&A template. It enforces stricter requirements on the model's answers.", "template.qa_template": "Q&A template", - "template.qa_template_des": "A knowledge base suitable for QA question and answer structure, which allows AI to answer strictly according to preset content", - "template.simple_robot": "Simple robot", - "template.standard_strict": "Standard strict template", - "template.standard_strict_des": "Based on the standard template, stricter requirements are imposed on the model's answers.", + "template.qa_template_des": "A template used for knowledge bases with a Q&A structure. With this template enabled, the model can be configured to output based on the predefined content.", + "template.simple_robot": "Simple bot", + "template.standard_strict": "Strict standard template", + "template.standard_strict_des": "A template based on standard template. It enforces stricter requirements on the model's answers.", "template.standard_template": "Standard template", - "template.standard_template_des": "Standard prompt words for knowledge bases with unfixed structures.", - "templateMarket.Search_template": "Search Template", + "template.standard_template_des": "A template that contains standard prompts. It is used for knowledge bases with no fixed structure.", + "templateMarket.Search_template": "Template", "templateMarket.Use": "Use", - "templateMarket.no_intro": "No introduction yet~", - "templateMarket.templateTags.Image_generation": "Image Generation", - "templateMarket.templateTags.Office_services": "Office Services", - "templateMarket.templateTags.Recommendation": "Recommendation", - "templateMarket.templateTags.Roleplay": "Roleplay", - "templateMarket.templateTags.Web_search": "Web Search", - "templateMarket.templateTags.Writing": "Writing", - "templateMarket.template_guide": "Guide", + "templateMarket.no_intro": "No data available.", + "templateMarket.templateTags.Image_generation": "Image generation", + "templateMarket.templateTags.Office_services": "Office service", + "templateMarket.templateTags.Recommendation": "Recommendations", + "templateMarket.templateTags.Roleplay": "Role playing", + "templateMarket.templateTags.Web_search": "Online search", + "templateMarket.templateTags.Writing": "Content creation", + "templateMarket.template_guide": "Template description", "template_market": "Templates", - "template_market_description": "Explore more features in the template market, with configuration tutorials and usage guides to help you understand and get started with various applications.", - "template_market_empty_data": "No suitable templates found", - "time_zone": "Time Zone", - "too_to_active": "Active", - "tool_active_manual_config_desc": "The temporary key is saved in this application and is only for use by this application.", - "tool_active_system_config_desc": "Use the system configured key", - "tool_active_system_config_price_desc": "Additional payment for key price ({{price}} points/time)", - "tool_active_system_config_price_desc_folder": "The additional key price is required, and the fee will be deducted based on the actual use of the tool.", + "template_market_description": "Come to explore more possibilities with templates. Follow the configuration and usage guides to develop apps using templates.", + "template_market_empty_data": "No suitable template available.", + "time_zone": "Time zone", + "too_to_active": "Activate", + "tool_active_manual_config_desc": "The temporary secret key is stored in the app and can only be used by the app.", + "tool_active_system_config_desc": "Use the system-configured key", + "tool_active_system_config_price_desc": "Additional price for using the secret key ({{price}} points/call).", + "tool_active_system_config_price_desc_folder": "Additional costs for using the secret key is required. The fee is charged based on tool usage.", "tool_detail": "Tool details", - "tool_input_param_tip": "This plugin requires configuration of related information to run properly.", - "tool_not_active": "This tool has not been activated yet", - "tool_run_free": "This tool runs without points consumption", - "tool_type_communication": "Communication", - "tool_type_design": "design", + "tool_input_param_tip": "To run the plugin properly, please configure the required information.", + "tool_not_active": "The tool has not been activated.", + "tool_run_free": "Running the tool does not consume points.", + "tool_type_communication": "Communications", + "tool_type_design": "Design", "tool_type_entertainment": "Business", - "tool_type_finance": "finance", + "tool_type_finance": "Finance", "tool_type_multimodal": "Multimodal", - "tool_type_news": "news", - "tool_type_productivity": "productive forces", - "tool_type_scientific": "research", + "tool_type_news": "News", + "tool_type_productivity": "Productivity", + "tool_type_scientific": "Scientific research", "tool_type_search": "Search", - "tool_type_social": "Social", - "tool_type_tools": "tool", - "tools_no_description": "This tool has not been introduced ~", - "transition_to_workflow": "Convert to Workflow", - "transition_to_workflow_create_new_placeholder": "Create a new app instead of modifying the current app", - "transition_to_workflow_create_new_tip": "Once converted to a workflow, it cannot be reverted to simple mode. Please confirm!", - "tts_ai_model": "Use a speech synthesis model", - "tts_browser": "Browser's own (free)", + "tool_type_social": "Social networking", + "tool_type_tools": "Tools", + "tools_no_description": "No description available for this tool.", + "transition_to_workflow": "Convert to workflow", + "transition_to_workflow_create_new_placeholder": "Create a new app instead of modifying the current one.", + "transition_to_workflow_create_new_tip": "If converted to a workflow app, it cannot be reverted to simple mode. Would you like to proceed?", + "tts_ai_model": "Use text-to-speech model", + "tts_browser": "Built-in in browser (free)", "tts_close": "Close", "type.All": "All", - "type.Create http plugin tip": "Batch create plugins through OpenAPI Schema, compatible with GPTs format.", - "type.Create mcp tools tip": "Automatically parse and batch create callable MCP tools by entering the MCP address", - "type.Create one plugin tip": "Customizable input and output workflows, usually used to encapsulate reusable workflows.", - "type.Create plugin bot": "Create Plugin", - "type.Create simple bot": "Create Simple App", - "type.Create simple bot tip": "Create a simple AI app by filling out a form, suitable for beginners.", - "type.Create workflow bot": "Create Workflow", - "type.Create workflow tip": "Build complex multi-turn dialogue AI applications through low-code methods, recommended for advanced users.", + "type.Create http plugin tip": "Bulk create plugins by using OpenAPI schema, The plugins to be created must be compatible with the GPTs format.", + "type.Create mcp tools tip": "Automatically parse the specified MCP address to bulk create callable MCP tools.", + "type.Create one plugin tip": "Encapsulate reusable processes of a workflow, with configurable inputs and outputs.", + "type.Create plugin bot": "Create plugin", + "type.Create simple bot": "Create simple app", + "type.Create simple bot tip": "Create a simple AI app by using a guided form, which is ideal for beginners.", + "type.Create workflow bot": "Create workflow", + "type.Create workflow tip": "Create a complex AI app that can handle multiple rounds of chats by using low-code methods, which is recommended for advanced users.", "type.Folder": "Folder", - "type.Http plugin": "HTTP Plugin", - "type.Import from json": "Import JSON", - "type.Import from json tip": "Create applications directly through JSON configuration files", - "type.Import from json_error": "Failed to get workflow data, please check the URL or manually paste the JSON data", - "type.Import from json_loading": "Workflow data is being retrieved, please wait...", - "type.MCP tools": "MCP Toolset", - "type.MCP_tools_url": "MCP Address", + "type.Http plugin": "HTTP plugin", + "type.Import from json": "Import JSON configuration file", + "type.Import from json tip": "Create an app using a JSON configuration file.", + "type.Import from json_error": "Failed to obtain workflow data. Please check the URL or paste the JSON data.", + "type.Import from json_loading": "Obtaining workflow data, please wait.", + "type.MCP tools": "MCP toolkit", + "type.MCP_tools_url": "MCP address", "type.Plugin": "Plugin", - "type.Simple bot": "Simple App", + "type.Simple bot": "Simple app", "type.Tool": "Tool", - "type.Tool set": "Toolset", + "type.Tool set": "toolkit", "type.Workflow bot": "Workflow", - "type.error.Workflow data is empty": "No workflow data was obtained", - "type.error.workflowresponseempty": "Response content is empty", - "type.hidden": "Hide app", - "type_not_recognized": "App type not recognized", + "type.error.Workflow data is empty": "Failed to obtain workflow data.", + "type.error.workflowresponseempty": "Response is empty.", + "type.hidden": "Hidden app", + "type_not_recognized": "Failed to recognize the app type.", "un_auth": "No permission", - "upload_file_max_amount": "Maximum File Quantity", - "upload_file_max_amount_tip": "Maximum number of files uploaded in a single round of conversation", - "variable.select type_desc": "You can define a global variable that does not need to be filled in by the user.\n\nThe value of this variable can come from the API interface, the Query of the shared link, or assigned through the [Variable Update] module.", - "variable.textarea_type_desc": "Allows users to input up to 4000 characters in the dialogue box.", - "variable_name_required": "Required variable name", - "variable_repeat": "This variable name has been occupied and cannot be used", - "version.Revert success": "Revert Successful", - "version_back": "Revert to Original State", - "version_copy": "Duplicate", - "version_initial_copy": "Duplicate - Original State", - "vision_model_title": "Image recognition ability", + "upload_file_max_amount": "Max files", + "upload_file_max_amount_tip": "The maximum number of files that can be uploaded in a chat", + "variable.select type_desc": "You can configure global variables for a workflow. These global variables are typically used for temporary caching. Variables can be specified in the following ways:\n1. Use query parameters on the chat page.\n2. Use variables in the API request.\n3. Use a node with the variable update tool.", + "variable.textarea_type_desc": "Up to 4,000 characters can be entered in the input box.", + "variable_name_required": "Variable name is required.", + "variable_repeat": "The variable name already exists.", + "version.Revert success": "Rollback successful.", + "version_back": "Roll back to initial status", + "version_copy": "Replica", + "version_initial_copy": "Replica - Initial status", + "vision_model_title": "Image recognition capability", "week.Friday": "Friday", "week.Monday": "Monday", "week.Saturday": "Saturday", @@ -329,22 +329,22 @@ "week.Thursday": "Thursday", "week.Tuesday": "Tuesday", "week.Wednesday": "Wednesday", - "workflow.Input guide": "Input Guide", - "workflow.file_url": "Document Link", + "workflow.Input guide": "Enter description", + "workflow.file_url": "Document link", "workflow.form_input": "Form input", - "workflow.form_input_description_placeholder": "For example: \nAdd your information", - "workflow.form_input_tip": " This module can configure multiple inputs to guide users in entering specific content.", - "workflow.input_description_tip": "You can add a description to explain to users what they need to input", - "workflow.read_files": "Document Parse", - "workflow.read_files_result": "Document Parsing Result", - "workflow.read_files_result_desc": "Original document text, consisting of file names and document content, separated by hyphens between multiple files.", - "workflow.read_files_tip": "Parse the documents uploaded in this round of dialogue and return the corresponding document content", - "workflow.select_description": "Description Text", - "workflow.select_description_placeholder": "For example: \nAre there tomatoes in the fridge?", - "workflow.select_description_tip": "You can add a description text to explain the meaning of each option to the user.", - "workflow.select_result": "Selected Result", - "workflow.user_file_input": "File Link", - "workflow.user_file_input_desc": "Links to documents and images uploaded by users.", - "workflow.user_select": "User Select", - "workflow.user_select_tip": "This module can configure multiple options for selection during the dialogue. Different options can lead to different workflow branches." + "workflow.form_input_description_placeholder": "Example:\nComplete your information", + "workflow.form_input_tip": "This module can be configured with multiple input prompts to guide users in entering specific content.", + "workflow.input_description_tip": "You can add a description to explain to users what they need to enter.", + "workflow.read_files": "Document parsing", + "workflow.read_files_result": "Result for document parsing", + "workflow.read_files_result_desc": "A file contains a filename and the content. Separate multiple files with hyphens.", + "workflow.read_files_tip": "Parse the file uploaded in the current chat and return the result.", + "workflow.select_description": "Description", + "workflow.select_description_placeholder": "Example:\nIs there any tomato in the refrigerator?", + "workflow.select_description_tip": "You can add a description to explain to users the definition of each option.", + "workflow.select_result": "Select variable name", + "workflow.user_file_input": "File link", + "workflow.user_file_input_desc": "File and image link uploaded", + "workflow.user_select": "User selection", + "workflow.user_select_tip": "The module can be configured with multiple options and can be selected during the chat. Different options direct the chat to different workflow branches." } diff --git a/packages/web/i18n/en/chat.json b/packages/web/i18n/en/chat.json index 5940a4fb5bb9..c774b0397803 100644 --- a/packages/web/i18n/en/chat.json +++ b/packages/web/i18n/en/chat.json @@ -1,131 +1,131 @@ { - "AI_input_is_empty": "The content passed to the AI ​​node is empty", - "Delete_all": "Clear All Lexicon", - "LLM_model_response_empty": "The model flow response is empty, please check whether the model flow output is normal.", - "ai_reasoning": "Thinking process", - "back_to_text": "Text input", - "chat.quote.No Data": "The file cannot be found", - "chat.quote.deleted": "This data has been deleted ~", - "chat.waiting_for_response": "Please wait for the conversation to complete", - "chat_history": "Conversation History", - "chat_input_guide_lexicon_is_empty": "Lexicon not configured yet", - "chat_test_app": "Debug-{{name}}", - "citations": "{{num}} References", - "click_contextual_preview": "Click to see contextual preview", - "completion_finish_close": "Disconnection", - "completion_finish_content_filter": "Trigger safe wind control", - "completion_finish_function_call": "Function Calls", - "completion_finish_length": "Reply limit exceeded", - "completion_finish_null": "unknown", - "completion_finish_reason": "Reason for completion", - "completion_finish_stop": "Completed normally", - "completion_finish_tool_calls": "Tool calls", - "config_input_guide": "Set Up Input Guide", - "config_input_guide_lexicon": "Set Up Lexicon", - "config_input_guide_lexicon_title": "Set Up Lexicon", - "content_empty": "No Content", - "contextual": "{{num}} Contexts", - "contextual_preview": "Contextual Preview {{num}} Items", - "core.chat.moveCancel": "Swipe to Cancel", - "core.chat.shortSpeak": "Speaking Time is Too Short", - "csv_input_lexicon_tip": "Only CSV batch import is supported, click to download the template", - "custom_input_guide_url": "Custom Lexicon URL", - "data_source": "Source Dataset: {{name}}", - "dataset_quote_type error": "Knowledge base reference type is wrong, correct type: { datasetId: string }[]", - "delete_all_input_guide_confirm": "Are you sure you want to clear the input guide lexicon?", + "AI_input_is_empty": "The content passed to the AI node is empty.", + "Delete_all": "Clear word library", + "LLM_model_response_empty": "The output of the model in streaming mode is empty. Please check whether the model output works properly in streaming mode.", + "ai_reasoning": "Reasoning process", + "back_to_text": "Return to input", + "chat.quote.No Data": "Unable to find the file.", + "chat.quote.deleted": "The data has been deleted.", + "chat.waiting_for_response": "Please wait for the chat to complete.", + "chat_history": "Chat records", + "chat_input_guide_lexicon_is_empty": "No word library is configured.", + "chat_test_app": "Debug - {{name}}", + "citations": "References: {{num}}", + "click_contextual_preview": "Click to preview context", + "completion_finish_close": "Disconnected", + "completion_finish_content_filter": "Safety control was triggered.", + "completion_finish_function_call": "Function call", + "completion_finish_length": "The output has exceeded the maximum.", + "completion_finish_null": "Unknown", + "completion_finish_reason": "Completion cause", + "completion_finish_stop": "Completed successfully.", + "completion_finish_tool_calls": "Tool call", + "config_input_guide": "Configure input guide", + "config_input_guide_lexicon": "Configure word library", + "config_input_guide_lexicon_title": "Configure word library", + "content_empty": "Content is empty.", + "contextual": "{{num}} contexts", + "contextual_preview": "Contexts previewed: {{num}}", + "core.chat.moveCancel": "Swipe up to cancel", + "core.chat.shortSpeak": "The speech is too short.", + "csv_input_lexicon_tip": "Only CSV files can be bulk imported. Click to download the template", + "custom_input_guide_url": "Custom word library IP address", + "data_source": "Source knowledge base: {{name}}", + "dataset_quote_type error": "Incorrect reference type of knowledge base. Correct type: { datasetId: string }[]", + "delete_all_input_guide_confirm": "Are you sure you want to clear the input guide library?", "download_chunks": "Download data", - "empty_directory": "This directory is empty~", - "error_message": "error message", - "file_amount_over": "Exceeded maximum file quantity {{max}}", - "file_input": "File input", - "file_input_tip": "You can obtain the link to the corresponding file through the \"File Link\" of the [Plug-in Start] node", - "history_slider.home.title": "chat", - "home.chat_app": "HomeChat-{{name}}", - "home.chat_id": "Chat ID", - "home.no_available_tools": "No tools available", - "home.select_tools": "Select Tool", - "home.tools": "Tool: {{num}}", - "in_progress": "In Progress", - "input_guide": "Input Guide", - "input_guide_lexicon": "Lexicon", - "input_guide_tip": "You can set up some preset questions. When the user inputs a question, related questions from these presets will be suggested.", - "input_placeholder_phone": "Please enter your question", - "insert_input_guide,_some_data_already_exists": "Duplicate data detected, automatically filtered, {{len}} items inserted", - "invalid_share_url": "Invalid sharing link", - "is_chatting": "Chatting in progress... please wait until it finishes", + "empty_directory": "No items selectable in the directory.", + "error_message": "Error details", + "file_amount_over": "The number of files exceeds the maximum ({{max}}).", + "file_input": "System file", + "file_input_tip": "You can obtain the required file link through the file link field in the plugin start node.", + "history_slider.home.title": "Chat", + "home.chat_app": "Home page chat - {{name}}", + "home.chat_id": "Session ID", + "home.no_available_tools": "No tools available.", + "home.select_tools": "Select", + "home.tools": "Tools: {{num}}", + "in_progress": "Ongoing", + "input_guide": "Input guide", + "input_guide_lexicon": "Word library", + "input_guide_tip": "You can configure some preset questions. When a user enters a question, related preset questions will be displayed as prompts.", + "input_placeholder_phone": "Enter question", + "insert_input_guide,_some_data_already_exists": "Duplicate entries have been detected and automatically filtered. {{len}} entries have been inserted.", + "invalid_share_url": "Invalid sharing link.", + "is_chatting": "Chatting, please wait.", "items": "Items", "llm_tokens": "LLM tokens", - "module_runtime_and": "Total Module Runtime", - "multiple_AI_conversations": "Multiple AI Conversations", - "new_input_guide_lexicon": "New Lexicon", - "no_invalid_app": "There are no available applications under your account", - "no_workflow_response": "No workflow data", - "not_query": "Missing query content", - "not_select_file": "No file selected", - "plugins_output": "Plugin Output", - "press_to_speak": "Hold down to speak", - "query_extension_IO_tokens": "Problem Optimization Input/Output Tokens", - "query_extension_result": "Problem optimization results", - "question_tip": "From top to bottom, the response order of each module", - "read_raw_source": "Open the original text", - "reasoning_text": "Thinking process", - "release_cancel": "Release Cancel", - "release_send": "Release send, slide up to cancel", - "response.child total points": "Sub-workflow point consumption", - "response.dataset_concat_length": "Combined total", - "response.node_inputs": "Node Inputs", + "module_runtime_and": "Total workflow uptime", + "multiple_AI_conversations": "Multiple AI chats", + "new_input_guide_lexicon": "New word library", + "no_invalid_app": "No apps available for your account.", + "no_workflow_response": "No running data available.", + "not_query": "Query content is missing.", + "not_select_file": "No file is selected.", + "plugins_output": "Plugin output", + "press_to_speak": "Hold to talk", + "query_extension_IO_tokens": "Input/output tokens for question optimization", + "query_extension_result": "Question optimization result", + "question_tip": "Modules respond in a top-down sequence.", + "read_raw_source": "View source text", + "reasoning_text": "Reasoning process", + "release_cancel": "Release to cancel", + "release_send": "Release to send, swipe up to cancel", + "response.child total points": "Points consumed by sub-workflow", + "response.dataset_concat_length": "Total shards after merging", + "response.node_inputs": "Node input", "response_embedding_model": "Vector model", - "response_embedding_model_tokens": "Vector Model Tokens", - "response_hybrid_weight": "Embedding : Full text = {{emb}} : {{text}}", - "response_rerank_tokens": "Rearrange Model Tokens", - "search_results": "Search results", + "response_embedding_model_tokens": "Vector model tokens", + "response_hybrid_weight": "Ratio of semantic search to full-text search: {{emb}}/{{text}}", + "response_rerank_tokens": "Reranker model tokens", + "search_results": "Search result", "select": "Select", - "select_file": "Upload File", - "select_file_img": "Upload file / image", + "select_file": "Upload file", + "select_file_img": "Upload file/image", "select_img": "Upload Image", - "setting.copyright.basic_configuration": "Basic configuration", - "setting.copyright.copyright_configuration": "Copyright configuration", - "setting.copyright.diagram": "Schematic diagram", - "setting.copyright.file_size_exceeds_limit": "File size exceeds the limit, maximum support for {{maxSize}}", - "setting.copyright.immediate_upload_required": "Immediate upload is required for this feature", + "setting.copyright.basic_configuration": "Basics", + "setting.copyright.copyright_configuration": "Copyright", + "setting.copyright.diagram": "Illustration", + "setting.copyright.file_size_exceeds_limit": "The file size exceeds the maximum ({{maxSize}}).", + "setting.copyright.immediate_upload_required": "Upload an image to use the feature.", "setting.copyright.logo": "Logo", - "setting.copyright.preview_fail": "File preview failed", - "setting.copyright.save_fail": "Logo failed to save", - "setting.copyright.save_success": "Logo Saved successfully", - "setting.copyright.select_logo_image": "Please select the logo image to upload first", - "setting.copyright.style_diagram": "Style diagram", - "setting.copyright.tips": "Suggested ratio 4:1", - "setting.copyright.tips.square": "Suggested ratio 1:1", - "setting.copyright.title": "Copyright", - "setting.copyright.upload_fail": "File upload failed", - "setting.data_dashboard.title": "Data board", - "setting.fastgpt_chat_diagram": "/imgs/chat/fastgpt_chat_diagram_en.png", + "setting.copyright.preview_fail": "Failed to preview the file.", + "setting.copyright.save_fail": "Failed to save the Logo.", + "setting.copyright.save_success": "Logo saved successfully.", + "setting.copyright.select_logo_image": "Please select a logo image first.", + "setting.copyright.style_diagram": "Illustration", + "setting.copyright.tips": "Recommended ratio: 4:1", + "setting.copyright.tips.square": "Recommended ratio: 1:1", + "setting.copyright.title": "Copyright info", + "setting.copyright.upload_fail": "Failed to upload the file.", + "setting.data_dashboard.title": "Data dashboard", + "setting.fastgpt_chat_diagram": "/imgs/chat/fastgpt_chat_diagram.png", "setting.home.available_tools.add": "Add", - "setting.home.commercial_version": "Commercial version", - "setting.home.diagram": "Schematic diagram", - "setting.home.dialogue_tips": "Dialog prompt text", - "setting.home.dialogue_tips.default": "You can ask me any questions", + "setting.home.commercial_version": "Enterprise edition", + "setting.home.diagram": "Illustration", + "setting.home.dialogue_tips": "Chat box prompt", + "setting.home.dialogue_tips.default": "You can ask me anything.", "setting.home.dialogue_tips_placeholder": "Please enter the prompt text of the dialog box", - "setting.home.home_tab_title": "Home Page Title", - "setting.home.home_tab_title_placeholder": "Please enter the title of the homepage", + "setting.home.home_tab_title": "Home page title", + "setting.home.home_tab_title_placeholder": "Home page title is required.", "setting.home.slogan": "Slogan", - "setting.home.slogan.default": "Hello 👋, I am FastGPT! Is there anything I can help you?", + "setting.home.slogan.default": "Hi 👋, I'm FastGPT. How can I help you today?", "setting.home.slogan_placeholder": "Please enter Slogan", - "setting.home.title": "Home", - "setting.incorrect_plan": "The current plan does not support this feature, please upgrade to the subscription plan", - "setting.incorrect_version": "This feature is not supported in the current version", - "setting.log_details.title": "Home Log", - "setting.logs.title": "Homepage log", + "setting.home.title": "Home page configuration", + "setting.incorrect_plan": "The current plan does not support this feature. Please upgrade your subscription plan.", + "setting.incorrect_version": "The current version does not support this feature.", + "setting.log_details.title": "Home page logs", + "setting.logs.title": "Home page logs", "setting.save": "Save", - "setting.save_success": "Save successfully", + "setting.save_success": "Changes saved successfully.", "sidebar.home": "Home", - "sidebar.team_apps": "Team Apps", + "sidebar.team_apps": "Team app", "source_cronJob": "Scheduled execution", - "start_chat": "Start", - "stream_output": "Stream Output", - "unsupported_file_type": "Unsupported file types", + "start_chat": "Chat now", + "stream_output": "Output in streaming mode", + "unsupported_file_type": "File type is not supported.", "upload": "Upload", - "variable_invisable_in_share": "Custom variables are not visible in login-free links", - "view_citations": "View References", - "web_site_sync": "Web Site Sync" + "variable_invisable_in_share": "Custom variables are not visible in login-free mode.", + "view_citations": "View reference", + "web_site_sync": "Website sync" } diff --git a/packages/web/i18n/en/common.json b/packages/web/i18n/en/common.json index 4a4a4a6db907..97f51358d6d7 100644 --- a/packages/web/i18n/en/common.json +++ b/packages/web/i18n/en/common.json @@ -1,56 +1,56 @@ { - "Action": "Action", + "Action": "Operation", "Add": "Add", - "Add_new_input": "Add new input", + "Add_new_input": "Add input", "All": "All", - "App": "Application", + "App": "App", "Cancel": "Cancel", - "Choose": "Choose", - "Click_to_expand": "Click to expand", + "Choose": "Select", + "Click_to_expand": "View details", "Close": "Close", - "Code": "Code", + "Code": "Source code", "Config": "Configuration", - "Confirm": "Confirm", - "Continue_Adding": "Continue adding", + "Confirm": "OK", + "Continue_Adding": "Add more", "Copy": "Copy", "Creating": "Creating", "Delete": "Delete", - "Detail": "Detail", - "Documents": "Documents", - "Done": "Done", + "Detail": "Details", + "Documents": "Document", + "Done": "Finish", "Download": "Download", "Edit": "Edit", "Error": "Error", "Exit": "Exit", "Export": "Export", - "FAQ.ai_point_a": "Each time an AI model is called, a certain amount of AI points will be consumed. \nFor specific calculation standards, please refer to the \"AI integral calculation standards\" above. \nThe system will give priority to the actual usage returned by the model manufacturer. If it is empty, the calculation method of GPT3.5 is used for estimation. 1Token≈0.7 Chinese characters ≈0.9 English words, and the characters that appear continuously may be considered as 1 Tokens.", - "FAQ.ai_point_expire_a": "Yes, they will expire. After the current package expires, the AI points will be reset to the new package's AI points. Annual package AI points are valid for one year, not monthly.", + "FAQ.ai_point_a": "Each AI model call consumes AI points. For details, see the billing standard by using AI points above. The system prioritizes the usage data returned by the model provider. If no usage data is returned, it estimates token consumption based on the calculation method of GPT-3.5: 1 token ≈ 0.7 Chinese characters ≈ 0.9 English words. Consecutive characters may be counted as 1 token.", + "FAQ.ai_point_expire_a": "Yes. After the current plan expires, AI points will be cleared and updated according to the new plan. The points for an annual plan are valid for one year and are not reset monthly.", "FAQ.ai_point_expire_q": "Do AI points expire?", "FAQ.ai_point_q": "What are AI points?", - "FAQ.check_subscription_a": "Go to Account - Personal Information - Package Details - Usage. You can view the effective and expiration dates of your subscribed packages. After the paid package expires, it will automatically switch to the free version.", - "FAQ.check_subscription_q": "Where can I view my subscribed packages?", - "FAQ.dataset_compute_a": "1 knowledge base storage is equal to 1 knowledge base index. \nA single chunked data usually corresponds to multiple indexes. You can see \"n group indexes\" in a single knowledge base collection.", - "FAQ.dataset_compute_q": "How is Dataset storage calculated?", - "FAQ.dataset_index_a": "No, but if the Dataset index exceeds the limit, you cannot insert or update Dataset content.", - "FAQ.dataset_index_q": "Will the Dataset index be deleted if it exceeds the limit?", - "FAQ.free_user_clean_a": "If a free team (free version and has not purchased additional packages) does not log in to the system for 30 consecutive days, the system will automatically clear all Dataset content under that team.", - "FAQ.free_user_clean_q": "Will the data of the free version be cleared?", - "FAQ.package_overlay_a": "Yes, each purchased resource pack is independent and will be used in an overlapping manner within its validity period. AI points will be deducted from the resource pack that expires first.", - "FAQ.package_overlay_q": "Can additional resource packs be stacked?", - "FAQ.switch_package_a": "The package usage rule is to prioritize the use of higher-level packages. Therefore, if the newly purchased package is higher than the current package, the new package will take effect immediately; otherwise, the current package will continue to be used.", - "FAQ.switch_package_q": "Will the subscription package be switched?", + "FAQ.check_subscription_a": "On the Account > Personal info > Plan details > Usage page, you can view the valid and expiration time of your plans. The paid plan will automatically switch to the free one after expiration.", + "FAQ.check_subscription_q": "Where can I view my subscription plans?", + "FAQ.dataset_compute_a": "One entry stored in the knowledge base equals one knowledge base index. One chunked entry usually corresponds to multiple indexes. Therefore, n sets of indexes can be found in one knowledge base collection.", + "FAQ.dataset_compute_q": "How is knowledge base storage calculated?", + "FAQ.dataset_index_a": "No. However, you cannot insert or update any content in the knowledge base.", + "FAQ.dataset_index_q": "Will existing entries in a knowledge base be deleted when the number of entries exceeds the maximum?", + "FAQ.free_user_clean_a": "If a team using the free edition without any extra subscription plans does not log in for 30 consecutive days, the system will automatically clear all knowledge base content belonging to the team.", + "FAQ.free_user_clean_q": "Will data be cleared in the free edition?", + "FAQ.package_overlay_a": "Yes. Purchased resource packages are independent and can be used concurrently within their validity periods. AI points from the resource package that expires first will be consumed first.", + "FAQ.package_overlay_q": "Can multiple extra resource packages be used concurrently?", + "FAQ.switch_package_a": "Subscription plan usage follows a priority rule where a plan with higher level is used first. Therefore, if the level of a new plan is higher than the current one, it takes effect immediately. Otherwise, the current plan will not be switched.", + "FAQ.switch_package_q": "Can I switch subscription plans?", "File": "File", "Finish": "Finish", "Folder": "Folder", - "FullScreen": "FullScreen", - "FullScreenLight": "FullScreenLight", + "FullScreen": "Full screen", + "FullScreenLight": "Full-screen preview", "Import": "Import", "Input": "Input", - "Instructions": "Instruction", - "Intro": "Introduction", + "Instructions": "Guide", + "Intro": "Description", "Loading": "Loading...", - "Login": "Login", - "Manual": "Manual", + "Login": "Log in", + "Manual": "Temporary", "More": "More", "Move": "Move", "Name": "Name", @@ -60,29 +60,29 @@ "Operation": "Operation", "Other": "Other", "Output": "Output", - "Params": "Parameters", - "Parse": "Analysis", + "Params": "Parameter", + "Parse": "Parse", "Permission": "Permission", - "Permission_tip": "Individual permissions are greater than group permissions", + "Permission_tip": "Individual permissions take precedence over group permissions.", "Preview": "Preview", "Remove": "Remove", "Rename": "Rename", "Required_input": "Required", - "Reset": "Reset", + "Reset": "Restore defaults", "Restart": "Restart", - "Resume": "Resume", + "Resume": "Restore", "Role": "Permission", "Run": "Run", "Running": "Running", "Save": "Save", - "Save_and_exit": "Save and Exit", + "Save_and_exit": "Save and exit", "Search": "Search", - "Select_App": "Select an application", + "Select_App": "Select", "Select_all": "Select all", - "Setting": "Setting", + "Setting": "Settings", "Status": "Status", "Submit": "Submit", - "Success": "Success", + "Success": "Successful", "System": "System", "Team": "Team", "UnKnow": "Unknown", @@ -90,103 +90,103 @@ "Update": "Update", "Username": "Username", "Waiting": "Waiting", - "Warning": "Warning", + "Warning": "Message", "Website": "Website", "action_confirm": "Confirm", - "add_new": "add_new", - "add_new_param": "Add new param", - "add_success": "Added Successfully", - "all_quotes": "All quotes", - "all_result": "Full Results", - "app_evaluation": "App Evaluation(Beta)", - "app_not_version": "This application has not been published, please publish it first", - "auth_config": "Authentication", + "add_new": "Add", + "add_new_param": "Add parameter", + "add_success": "Added successfully.", + "all_quotes": "Reference all", + "all_result": "Complete result", + "app_evaluation": "App evaluation (Beta)", + "app_not_version": "The app is not published. Please publish it first.", + "auth_config": "Authentication configuration", "auth_type": "Authentication type", - "auth_type.Custom": "Customize", + "auth_type.Custom": "Custom", "auth_type.None": "None", "back": "Back", - "base_config": "Basic Configuration", - "bill_already_processed": "Order has been processed", - "bill_expired": "Order expired", - "bill_not_pay_processed": "Non-online orders", - "button.extra_dataset_size_tip": "You are purchasing [Extra Knowledge Base Capacity]", - "button.extra_points_tip": "You are purchasing [Extra AI Points]", - "can_copy_content_tip": "It is not possible to copy automatically using the browser, please manually copy the following content", + "base_config": "Basics", + "bill_already_processed": "Processed", + "bill_expired": "Expired", + "bill_not_pay_processed": "Offline", + "button.extra_dataset_size_tip": "You are purchasing extra knowledge base capacity.", + "button.extra_points_tip": "You are purchasing extra AI points.", + "can_copy_content_tip": "Auto copy is unavailable. Please manually copy the content below.", "chart_mode_cumulative": "Cumulative", - "chart_mode_incremental": "Incremental", + "chart_mode_incremental": "Periodic", "chat": "Session", - "chat_chatId": "Session Id: {{chatId}}", - "choosable": "Choosable", - "chose_condition": "Choose Condition", - "chosen": "Chosen", + "chat_chatId": "Session ID: {{chatId}}", + "choosable": "Available", + "chose_condition": "Filter", + "chosen": "Selected", "classification": "Classification", - "click_drag_tip": "Click to Drag", - "click_select_avatar": "Click to Select Avatar", + "click_drag_tip": "Click and drag", + "click_select_avatar": "Click to select a profile image.", "click_to_copy": "Click to copy", - "click_to_resume": "Click to Resume", - "code_editor": "Code Editor", - "code_error.account_error": "Incorrect account name or password", - "code_error.account_exist": "Account has been registered", - "code_error.account_not_found": "User is not registered", - "code_error.app_error.can_not_edit_admin_permission": "Can not edit admin permission", - "code_error.app_error.invalid_app_type": "Invalid Application Type", - "code_error.app_error.invalid_owner": "Unauthorized Application Owner", - "code_error.app_error.not_exist": "Application Does Not Exist", - "code_error.app_error.un_auth_app": "Unauthorized to Operate This Application", - "code_error.chat_error.un_auth": "Unauthorized to Operate This Chat Record", - "code_error.error_code.400": "Request Failed", - "code_error.error_code.401": "No Access Permission", - "code_error.error_code.403": "Access Forbidden", - "code_error.error_code.404": "Request Not Found", - "code_error.error_code.405": "Request Method Error", - "code_error.error_code.406": "Request Format Error", - "code_error.error_code.410": "Resource Deleted", - "code_error.error_code.422": "Validation Error", - "code_error.error_code.500": "Server Error", - "code_error.error_code.502": "Gateway Error", - "code_error.error_code.503": "Server Overloaded or Under Maintenance", - "code_error.error_code.504": "Gateway Timeout", - "code_error.error_code[429]": "Requests are too frequent", - "code_error.error_message.403": "Credential Error", - "code_error.error_message.510": "Insufficient Account Balance", - "code_error.error_message.511": "Unauthorized to Operate This Model", - "code_error.error_message.513": "Unauthorized to Read This File", - "code_error.error_message.514": "Invalid API Key", - "code_error.openapi_error.api_key_not_exist": "API Key Does Not Exist", - "code_error.openapi_error.exceed_limit": "Up to 10 API Keys", - "code_error.openapi_error.un_auth": "Unauthorized to Operate This API Key", - "code_error.outlink_error.invalid_link": "Invalid Share Link", - "code_error.outlink_error.link_not_exist": "Share Link Does Not Exist", - "code_error.outlink_error.un_auth_user": "Identity Verification Failed", - "code_error.plugin_error.not_exist": "The tool does not exist", - "code_error.plugin_error.un_auth": "No permission to operate the tool", - "code_error.system_error.community_version_num_limit": "Exceeded Open Source Version Limit, Please Upgrade to Commercial Version: https://fastgpt.io", - "code_error.system_error.license_app_amount_limit": "Exceed the maximum number of applications in the system", - "code_error.system_error.license_dataset_amount_limit": "Exceed the maximum number of knowledge bases in the system", - "code_error.system_error.license_user_amount_limit": "Exceed the maximum number of users in the system", - "code_error.team_error.ai_points_not_enough": "Insufficient AI Points", - "code_error.team_error.app_amount_not_enough": "Application Limit Reached", - "code_error.team_error.cannot_delete_default_group": "Cannot delete default group", - "code_error.team_error.cannot_delete_non_empty_org": "Cannot delete non-empty organization", - "code_error.team_error.cannot_modify_root_org": "Cannot modify root organization", - "code_error.team_error.cannot_move_to_sub_path": "Cannot move to same or subdirectory", - "code_error.team_error.dataset_amount_not_enough": "Dataset Limit Reached", - "code_error.team_error.dataset_size_not_enough": "Insufficient Dataset Capacity, Please Expand", - "code_error.team_error.group_name_duplicate": "Duplicate group name", - "code_error.team_error.group_name_empty": "Group name cannot be empty", - "code_error.team_error.group_not_exist": "Group does not exist", - "code_error.team_error.invitation_link_invalid": "The invitation link has expired", - "code_error.team_error.not_user": "The member cannot be found", - "code_error.team_error.org_member_duplicated": "Duplicate organization member", - "code_error.team_error.org_member_not_exist": "Organization member does not exist", - "code_error.team_error.org_not_exist": "Organization does not exist", - "code_error.team_error.org_parent_not_exist": "Parent organization does not exist", - "code_error.team_error.over_size": "Team members exceed limit", - "code_error.team_error.plugin_amount_not_enough": "Plugin Limit Reached", - "code_error.team_error.re_rank_not_enough": "Search rearrangement cannot be used in the free version~", - "code_error.team_error.too_many_invitations": "You have reached the maximum number of active invitation links, please clean up some links first", - "code_error.team_error.un_auth": "Unauthorized to Operate This Team", - "code_error.team_error.user_not_active": "The user did not accept or has left the team", + "click_to_resume": "Click to restore", + "code_editor": "Edit code", + "code_error.account_error": "Username or password is invalid.", + "code_error.account_exist": "The account already exists.", + "code_error.account_not_found": "The account does not exist.", + "code_error.app_error.can_not_edit_admin_permission": "Administrator permissions cannot be edited.", + "code_error.app_error.invalid_app_type": "Invalid app type.", + "code_error.app_error.invalid_owner": "Invalid app owner.", + "code_error.app_error.not_exist": "The app does not exist.", + "code_error.app_error.un_auth_app": "You do not have permission to perform operation on the app.", + "code_error.chat_error.un_auth": "You do not have permission to perform operations on the chat record.", + "code_error.error_code.400": "Request failed.", + "code_error.error_code.401": "You do not have access permission.", + "code_error.error_code.403": "Access denied", + "code_error.error_code.404": "The request does not exist.", + "code_error.error_code.405": "Request method error.", + "code_error.error_code.406": "Request format is invalid.", + "code_error.error_code.410": "The resource has been deleted.", + "code_error.error_code.422": "Authentication error.", + "code_error.error_code.500": "The server encountered error.", + "code_error.error_code.502": "Gateway error.", + "code_error.error_code.503": "The server is temporarily overloaded or under maintenance.", + "code_error.error_code.504": "Gateway timed out.", + "code_error.error_code.429": "Requests are too frequent", + "code_error.error_message.403": "Credential error.", + "code_error.error_message.510": "The account balance is insufficient.", + "code_error.error_message.511": "You do not have permission to perform operations on the model.", + "code_error.error_message.513": "You do not have permission to read the file.", + "code_error.error_message.514": "Invalid API key.", + "code_error.openapi_error.api_key_not_exist": "The API key does not exist.", + "code_error.openapi_error.exceed_limit": "Up to 10 API keys can be created.", + "code_error.openapi_error.un_auth": "You do not have permission to perform operations on the API key.", + "code_error.outlink_error.invalid_link": "Invalid sharing link.", + "code_error.outlink_error.link_not_exist": "The sharing link does not exist.", + "code_error.outlink_error.un_auth_user": "Identity verification failed.", + "code_error.plugin_error.not_exist": "The tool does not exist.", + "code_error.plugin_error.un_auth": "You do not have permission to perform operations on the tool.", + "code_error.system_error.community_version_num_limit": "The number of resources exceeded the maximum allowed by the community edition. Please upgrade to the enterprise edition: https://fastgpt.in", + "code_error.system_error.license_app_amount_limit": "The number of apps has exceeded the maximum.", + "code_error.system_error.license_dataset_amount_limit": "The number of knowledge bases has exceeded the maximum.", + "code_error.system_error.license_user_amount_limit": "The number of users has exceeded the maximum.", + "code_error.team_error.ai_points_not_enough": "AI points are insufficient.", + "code_error.team_error.app_amount_not_enough": "The number of apps has reached the maximum.", + "code_error.team_error.cannot_delete_default_group": "The group cannot be deleted because it is a default group.", + "code_error.team_error.cannot_delete_non_empty_org": "The department cannot be deleted because it is not empty.", + "code_error.team_error.cannot_modify_root_org": "The department cannot be deleted because it is the root department.", + "code_error.team_error.cannot_move_to_sub_path": "You cannot move it to the same directory or a sub-directory.", + "code_error.team_error.dataset_amount_not_enough": "The number of knowledge bases has reached the maximum.", + "code_error.team_error.dataset_size_not_enough": "The knowledge base capacity is insufficient. Please expand it first.", + "code_error.team_error.group_name_duplicate": "The group name already exists.", + "code_error.team_error.group_name_empty": "Group name is required.", + "code_error.team_error.group_not_exist": "The group does not exist.", + "code_error.team_error.invitation_link_invalid": "The invitation link has expired.", + "code_error.team_error.not_user": "Unable to find the member.", + "code_error.team_error.org_member_duplicated": "The member already exists in the department.", + "code_error.team_error.org_member_not_exist": "The member does not exist in the department.", + "code_error.team_error.org_not_exist": "The department does not exist.", + "code_error.team_error.org_parent_not_exist": "The parent department does not exist.", + "code_error.team_error.over_size": "The number of team members has exceeded the maximum.", + "code_error.team_error.plugin_amount_not_enough": "The number of plugins has reached the maximum.", + "code_error.team_error.re_rank_not_enough": "Reranking of retrieved results is not supported for the free edition.", + "code_error.team_error.too_many_invitations": "The number of valid invitation links has reached the maximum. Please delete some links first.", + "code_error.team_error.un_auth": "You do not have permission to perform operations on the team.", + "code_error.team_error.user_not_active": "The user has not accepted the invitation or has left the team.", "code_error.team_error.website_sync_not_enough": "The free version cannot be synchronized with the web site ~", "code_error.team_error.you_have_been_in_the_team": "You are already in this team", "code_error.token_error_code.403": "Invalid Login Status, Please Re-login", From e0271f5a31972cd1114885804f6ad70b09d1c02e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=83=91=E6=96=AF=E6=88=8814864?= <14864@sangfor.com> Date: Tue, 2 Sep 2025 20:36:46 +0800 Subject: [PATCH 018/255] feat: Add evaluation dimension management functionality, optimize selectors and form validation - Merge multilingual files for evaluation dimensions and datasets - Add evaluation dimension creation and editing pages - Implement evaluation dimension form validation and submission logic - Add evaluation dimension trial run functionality - Optimize resource selector component, support hiding root directory and avatar display control - Add reference template component, provide standard evaluation templates - Implement answer input component, support collapse and automatic height adjustment - Add application selector component, support displaying all application options --- packages/web/i18n/constants.ts | 4 +- .../web/i18n/en/dashboard_evaluation.json | 76 +++- packages/web/i18n/en/evaluation_dataset.json | 22 -- .../web/i18n/en/evaluation_dimension.json | 12 - packages/web/i18n/i18next.d.ts | 4 - .../web/i18n/zh-CN/dashboard_evaluation.json | 76 +++- .../web/i18n/zh-CN/evaluation_dataset.json | 22 -- .../web/i18n/zh-CN/evaluation_dimension.json | 12 - .../i18n/zh-Hant/dashboard_evaluation.json | 76 +++- .../web/i18n/zh-Hant/evaluation_dataset.json | 22 -- .../i18n/zh-Hant/evaluation_dimension.json | 12 - .../common/folder/SelectOneResource.tsx | 46 ++- .../evaluation/dimension/AnswerTextarea.tsx | 133 +++++++ .../evaluation/dimension/CitationTemplate.tsx | 367 ++++++++++++++++++ .../evaluation/dimension/EditForm.tsx | 173 +++++++++ .../evaluation/dimension/TestRun.tsx | 321 +++++++++++++++ .../evaluation/dimension/styles.module.scss | 11 + .../evaluation/task/AppSelectWithAll.tsx | 143 +++++++ .../dashboard/evaluation/dataset/index.tsx | 42 +- .../dashboard/evaluation/dimension/create.tsx | 128 ++++++ .../dashboard/evaluation/dimension/edit.tsx | 192 +++++++++ .../dashboard/evaluation/dimension/index.tsx | 20 +- 22 files changed, 1752 insertions(+), 162 deletions(-) delete mode 100644 packages/web/i18n/en/evaluation_dataset.json delete mode 100644 packages/web/i18n/en/evaluation_dimension.json delete mode 100644 packages/web/i18n/zh-CN/evaluation_dataset.json delete mode 100644 packages/web/i18n/zh-CN/evaluation_dimension.json delete mode 100644 packages/web/i18n/zh-Hant/evaluation_dataset.json delete mode 100644 packages/web/i18n/zh-Hant/evaluation_dimension.json create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/dimension/AnswerTextarea.tsx create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/dimension/CitationTemplate.tsx create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/dimension/EditForm.tsx create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/dimension/TestRun.tsx create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/dimension/styles.module.scss create mode 100644 projects/app/src/pageComponents/dashboard/evaluation/task/AppSelectWithAll.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/dimension/create.tsx create mode 100644 projects/app/src/pages/dashboard/evaluation/dimension/edit.tsx diff --git a/packages/web/i18n/constants.ts b/packages/web/i18n/constants.ts index 263a64946dd6..d568e47831ee 100644 --- a/packages/web/i18n/constants.ts +++ b/packages/web/i18n/constants.ts @@ -20,9 +20,7 @@ export const I18N_NAMESPACES = [ 'account_team', 'account_model', 'dashboard_mcp', - 'dashboard_evaluation', - 'evaluation_dataset', - 'evaluation_dimension' + 'dashboard_evaluation' ]; export const I18N_NAMESPACES_MAP = I18N_NAMESPACES.reduce( diff --git a/packages/web/i18n/en/dashboard_evaluation.json b/packages/web/i18n/en/dashboard_evaluation.json index a529fc21b8e9..f58090cf4898 100644 --- a/packages/web/i18n/en/dashboard_evaluation.json +++ b/packages/web/i18n/en/dashboard_evaluation.json @@ -71,5 +71,79 @@ "evaluation_datasets_tab": "评测数据集", "evaluation_dimensions_tab": "评测维度", "create_new": "新建", - "retry_error_data": "重试异常数据" + "retry_error_data": "重试异常数据", + "dataset_name_placeholder": "名称", + "create_new_dataset": "新建数据集", + "smart_generation": "智能生成", + "file_import": "文件导入", + "confirm_delete_dataset": "确认删除该数据集吗?", + "error_details": "异常详情", + "status_queuing": "排队中", + "status_parsing": "文件解析中", + "status_generating": "数据生成中", + "status_generate_error": "生成异常", + "status_ready": "已就绪", + "status_parse_error": "解析异常", + "click_to_view_details": "点击查看详情", + "table_header_name": "名称", + "table_header_data_count": "数据量", + "table_header_time": "创建/更新时间", + "table_header_status": "状态", + "table_header_creator": "创建人", + "create_dimension": "新建维度", + "search_dimension": "搜索评测维度", + "delete_failed": "删除失败", + "delete_success": "删除成功", + "builtin": "内置", + "confirm_delete_dimension": "确认删除该维度?", + "dimension_name": "维度名", + "description": "介绍", + "create_update_time": "创建/更新时间", + "creator": "创建人", + "all": "全部", + "app": "应用", + "citation_template": "引用模板", + "correctness": "正确性", + "conciseness": "简洁性", + "harmfulness": "有害性", + "controversiality": "争议性", + "creativity": "创造性", + "criminality": "犯罪性", + "depth": "深度性", + "details": "细节性", + "dimension_name_label": "维度名", + "dimension_description_label": "维度描述", + "prompt_label": "提示词", + "citation_template_button": "引用模板", + "test_run_title": "试运行", + "question_label": "问题", + "question_placeholder": "请输入问题内容", + "answer_label": "答案", + "reference_answer_label": "参考答案", + "reference_answer_placeholder": "请输入参考答案", + "actual_answer_label": "实际回答", + "actual_answer_placeholder": "请输入实际回答", + "run_result_label": "运行结果", + "start_run_button": "开始运行", + "running_text": "运行中", + "run_success": "运行成功", + "run_failed": "运行失败", + "not_run": "未运行", + "score_unit": "分", + "error_info_label": "报错信息:", + "no_feedback_text": "暂无反馈内容", + "dimension_create_back": "退出", + "dimension_create_test_run": "试运行", + "dimension_create_confirm": "确认", + "dimension_create_success": "创建成功", + "dimension_create_name_required": "请输入名称", + "dimension_create_prompt_required": "请输入提示词", + "dimension_get_data_failed": "获取维度数据失败", + "dimension_data_not_exist": "维度数据不存在", + "dimension_update_success": "更新成功", + "dimension_update_failed": "更新失败", + "dimension_name_required": "请输入名称", + "dimension_back": "退出", + "dimension_test_run": "试运行", + "dimension_save": "保存" } diff --git a/packages/web/i18n/en/evaluation_dataset.json b/packages/web/i18n/en/evaluation_dataset.json deleted file mode 100644 index c1bf88a67062..000000000000 --- a/packages/web/i18n/en/evaluation_dataset.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "dataset_name_placeholder": "名称", - "create_new_dataset": "新建数据集", - "smart_generation": "智能生成", - "file_import": "文件导入", - "rename": "重命名", - "delete": "删除", - "confirm_delete_dataset": "确认删除该数据集吗?", - "error_details": "异常详情", - "status_queuing": "排队中", - "status_parsing": "文件解析中", - "status_generating": "数据生成中", - "status_generate_error": "生成异常", - "status_ready": "已就绪", - "status_parse_error": "解析异常", - "click_to_view_details": "点击查看详情", - "table_header_name": "名称", - "table_header_data_count": "数据量", - "table_header_time": "创建/更新时间", - "table_header_status": "状态", - "table_header_creator": "创建人" -} \ No newline at end of file diff --git a/packages/web/i18n/en/evaluation_dimension.json b/packages/web/i18n/en/evaluation_dimension.json deleted file mode 100644 index 01993b6cb7f4..000000000000 --- a/packages/web/i18n/en/evaluation_dimension.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "create_dimension": "新建维度", - "search_dimension": "搜索评测维度", - "delete_failed": "删除失败", - "delete_success": "删除成功", - "builtin": "内置", - "confirm_delete_dimension": "确认删除该维度?", - "dimension_name": "维度名", - "description": "介绍", - "create_update_time": "创建/更新时间", - "creator": "创建人" -} \ No newline at end of file diff --git a/packages/web/i18n/i18next.d.ts b/packages/web/i18n/i18next.d.ts index 8c55c1cd45ca..b831611dabdd 100644 --- a/packages/web/i18n/i18next.d.ts +++ b/packages/web/i18n/i18next.d.ts @@ -20,8 +20,6 @@ import type chat from './zh-CN/chat.json'; import type login from './zh-CN/login.json'; import type account_model from './zh-CN/account_model.json'; import type dashboard_mcp from './zh-CN/dashboard_mcp.json'; -import type evaluation_dimension from './zh-CN/evaluation_dimension.json'; -import type evaluation_dataset from './zh-CN/evaluation_dataset.json'; import type { I18N_NAMESPACES } from './constants'; export interface I18nNamespaces { @@ -47,8 +45,6 @@ export interface I18nNamespaces { account_model: typeof account_model; dashboard_mcp: typeof dashboard_mcp; dashboard_evaluation: typeof dashboard_evaluation; - evaluation_dataset: typeof evaluation_dataset; - evaluation_dimension: typeof evaluation_dimension; } export type I18nNsType = (keyof I18nNamespaces)[]; diff --git a/packages/web/i18n/zh-CN/dashboard_evaluation.json b/packages/web/i18n/zh-CN/dashboard_evaluation.json index fbf5123e4310..ca15c8fbb99f 100644 --- a/packages/web/i18n/zh-CN/dashboard_evaluation.json +++ b/packages/web/i18n/zh-CN/dashboard_evaluation.json @@ -74,5 +74,79 @@ "evaluation_datasets_tab": "评测数据集", "evaluation_dimensions_tab": "评测维度", "create_new": "新建", - "retry_error_data": "重试异常数据" + "retry_error_data": "重试异常数据", + "dataset_name_placeholder": "名称", + "create_new_dataset": "新建数据集", + "smart_generation": "智能生成", + "file_import": "文件导入", + "confirm_delete_dataset": "确认删除该数据集吗?", + "error_details": "异常详情", + "status_queuing": "排队中", + "status_parsing": "文件解析中", + "status_generating": "数据生成中", + "status_generate_error": "生成异常", + "status_ready": "已就绪", + "status_parse_error": "解析异常", + "click_to_view_details": "点击查看详情", + "table_header_name": "名称", + "table_header_data_count": "数据量", + "table_header_time": "创建/更新时间", + "table_header_status": "状态", + "table_header_creator": "创建人", + "create_dimension": "新建维度", + "search_dimension": "搜索评测维度", + "delete_failed": "删除失败", + "delete_success": "删除成功", + "builtin": "内置", + "confirm_delete_dimension": "确认删除该维度?", + "dimension_name": "维度名", + "description": "介绍", + "create_update_time": "创建/更新时间", + "creator": "创建人", + "all": "全部", + "app": "应用", + "citation_template": "引用模板", + "correctness": "正确性", + "conciseness": "简洁性", + "harmfulness": "有害性", + "controversiality": "争议性", + "creativity": "创造性", + "criminality": "犯罪性", + "depth": "深度性", + "details": "细节性", + "dimension_name_label": "维度名", + "dimension_description_label": "维度描述", + "prompt_label": "提示词", + "citation_template_button": "引用模板", + "test_run_title": "试运行", + "question_label": "问题", + "question_placeholder": "请输入问题内容", + "answer_label": "答案", + "reference_answer_label": "参考答案", + "reference_answer_placeholder": "请输入参考答案", + "actual_answer_label": "实际回答", + "actual_answer_placeholder": "请输入实际回答", + "run_result_label": "运行结果", + "start_run_button": "开始运行", + "running_text": "运行中", + "run_success": "运行成功", + "run_failed": "运行失败", + "not_run": "未运行", + "score_unit": "分", + "error_info_label": "报错信息:", + "no_feedback_text": "暂无反馈内容", + "dimension_create_back": "退出", + "dimension_create_test_run": "试运行", + "dimension_create_confirm": "确认", + "dimension_create_success": "创建成功", + "dimension_create_name_required": "请输入名称", + "dimension_create_prompt_required": "请输入提示词", + "dimension_get_data_failed": "获取维度数据失败", + "dimension_data_not_exist": "维度数据不存在", + "dimension_update_success": "更新成功", + "dimension_update_failed": "更新失败", + "dimension_name_required": "请输入名称", + "dimension_back": "退出", + "dimension_test_run": "试运行", + "dimension_save": "保存" } diff --git a/packages/web/i18n/zh-CN/evaluation_dataset.json b/packages/web/i18n/zh-CN/evaluation_dataset.json deleted file mode 100644 index c1bf88a67062..000000000000 --- a/packages/web/i18n/zh-CN/evaluation_dataset.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "dataset_name_placeholder": "名称", - "create_new_dataset": "新建数据集", - "smart_generation": "智能生成", - "file_import": "文件导入", - "rename": "重命名", - "delete": "删除", - "confirm_delete_dataset": "确认删除该数据集吗?", - "error_details": "异常详情", - "status_queuing": "排队中", - "status_parsing": "文件解析中", - "status_generating": "数据生成中", - "status_generate_error": "生成异常", - "status_ready": "已就绪", - "status_parse_error": "解析异常", - "click_to_view_details": "点击查看详情", - "table_header_name": "名称", - "table_header_data_count": "数据量", - "table_header_time": "创建/更新时间", - "table_header_status": "状态", - "table_header_creator": "创建人" -} \ No newline at end of file diff --git a/packages/web/i18n/zh-CN/evaluation_dimension.json b/packages/web/i18n/zh-CN/evaluation_dimension.json deleted file mode 100644 index 01993b6cb7f4..000000000000 --- a/packages/web/i18n/zh-CN/evaluation_dimension.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "create_dimension": "新建维度", - "search_dimension": "搜索评测维度", - "delete_failed": "删除失败", - "delete_success": "删除成功", - "builtin": "内置", - "confirm_delete_dimension": "确认删除该维度?", - "dimension_name": "维度名", - "description": "介绍", - "create_update_time": "创建/更新时间", - "creator": "创建人" -} \ No newline at end of file diff --git a/packages/web/i18n/zh-Hant/dashboard_evaluation.json b/packages/web/i18n/zh-Hant/dashboard_evaluation.json index 77bad3bcf516..05d7a81ef75a 100644 --- a/packages/web/i18n/zh-Hant/dashboard_evaluation.json +++ b/packages/web/i18n/zh-Hant/dashboard_evaluation.json @@ -67,5 +67,79 @@ "evaluation_datasets_tab": "評測數據集", "evaluation_dimensions_tab": "評測維度", "create_new": "新建", - "retry_error_data": "重試異常數據" + "retry_error_data": "重試異常數據", + "dataset_name_placeholder": "名稱", + "create_new_dataset": "新建數據集", + "smart_generation": "智能生成", + "file_import": "文件導入", + "confirm_delete_dataset": "確認刪除該數據集嗎?", + "error_details": "異常詳情", + "status_queuing": "排隊中", + "status_parsing": "文件解析中", + "status_generating": "數據生成中", + "status_generate_error": "生成異常", + "status_ready": "已就緒", + "status_parse_error": "解析異常", + "click_to_view_details": "點擊查看詳情", + "table_header_name": "名稱", + "table_header_data_count": "數據量", + "table_header_time": "創建/更新時間", + "table_header_status": "狀態", + "table_header_creator": "創建人", + "create_dimension": "新建維度", + "search_dimension": "搜索評測維度", + "delete_failed": "刪除失敗", + "delete_success": "刪除成功", + "builtin": "內建", + "confirm_delete_dimension": "確認刪除該維度?", + "dimension_name": "維度名", + "description": "介紹", + "create_update_time": "創建/更新時間", + "creator": "創建人", + "all": "全部", + "app": "應用", + "citation_template": "引用模板", + "correctness": "正確性", + "conciseness": "簡潔性", + "harmfulness": "有害性", + "controversiality": "爭議性", + "creativity": "創造性", + "criminality": "犯罪性", + "depth": "深度性", + "details": "細節性", + "dimension_name_label": "維度名", + "dimension_description_label": "維度描述", + "prompt_label": "提示詞", + "citation_template_button": "引用模板", + "test_run_title": "試運行", + "question_label": "問題", + "question_placeholder": "請輸入問題內容", + "answer_label": "答案", + "reference_answer_label": "參考答案", + "reference_answer_placeholder": "請輸入參考答案", + "actual_answer_label": "實際回答", + "actual_answer_placeholder": "請輸入實際回答", + "run_result_label": "運行結果", + "start_run_button": "開始運行", + "running_text": "運行中", + "run_success": "運行成功", + "run_failed": "運行失敗", + "not_run": "未運行", + "score_unit": "分", + "error_info_label": "報錯信息:", + "no_feedback_text": "暫無反饋內容", + "dimension_create_back": "退出", + "dimension_create_test_run": "試運行", + "dimension_create_confirm": "確認", + "dimension_create_success": "創建成功", + "dimension_create_name_required": "請輸入名稱", + "dimension_create_prompt_required": "請輸入提示詞", + "dimension_get_data_failed": "獲取維度數據失敗", + "dimension_data_not_exist": "維度數據不存在", + "dimension_update_success": "更新成功", + "dimension_update_failed": "更新失敗", + "dimension_name_required": "請輸入名稱", + "dimension_back": "退出", + "dimension_test_run": "試運行", + "dimension_save": "保存" } diff --git a/packages/web/i18n/zh-Hant/evaluation_dataset.json b/packages/web/i18n/zh-Hant/evaluation_dataset.json deleted file mode 100644 index 53686d0264cb..000000000000 --- a/packages/web/i18n/zh-Hant/evaluation_dataset.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "dataset_name_placeholder": "名稱", - "create_new_dataset": "新建數據集", - "smart_generation": "智能生成", - "file_import": "文件導入", - "rename": "重命名", - "delete": "刪除", - "confirm_delete_dataset": "確認刪除該數據集嗎?", - "error_details": "異常詳情", - "status_queuing": "排隊中", - "status_parsing": "文件解析中", - "status_generating": "數據生成中", - "status_generate_error": "生成異常", - "status_ready": "已就緒", - "status_parse_error": "解析異常", - "click_to_view_details": "點擊查看詳情", - "table_header_name": "名稱", - "table_header_data_count": "數據量", - "table_header_time": "創建/更新時間", - "table_header_status": "狀態", - "table_header_creator": "創建人" -} \ No newline at end of file diff --git a/packages/web/i18n/zh-Hant/evaluation_dimension.json b/packages/web/i18n/zh-Hant/evaluation_dimension.json deleted file mode 100644 index eb32afb89d81..000000000000 --- a/packages/web/i18n/zh-Hant/evaluation_dimension.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "create_dimension": "新建維度", - "search_dimension": "搜索評測維度", - "delete_failed": "刪除失敗", - "delete_success": "刪除成功", - "builtin": "內建", - "confirm_delete_dimension": "確認刪除該維度?", - "dimension_name": "維度名", - "description": "介紹", - "create_update_time": "創建/更新時間", - "creator": "創建人" -} \ No newline at end of file diff --git a/projects/app/src/components/common/folder/SelectOneResource.tsx b/projects/app/src/components/common/folder/SelectOneResource.tsx index 5d8043c328aa..76e6efa54b22 100644 --- a/projects/app/src/components/common/folder/SelectOneResource.tsx +++ b/projects/app/src/components/common/folder/SelectOneResource.tsx @@ -16,6 +16,7 @@ import { useTranslation } from 'next-i18next'; type ResourceItemType = GetResourceListItemResponse & { open: boolean; children?: ResourceItemType[]; + showAvatar?: boolean; }; const rootId = 'root'; @@ -24,28 +25,33 @@ const SelectOneResource = ({ server, value, onSelect, - maxH = ['80vh', '600px'] + maxH = ['80vh', '600px'], + showRoot = true }: { server: (e: GetResourceFolderListProps) => Promise; value?: ParentIdType; onSelect: (e?: ResourceItemType) => any; maxH?: BoxProps['maxH']; + showRoot?: boolean; }) => { const { t } = useTranslation(); const [dataList, setDataList] = useState([]); const [requestingIdList, setRequestingIdList] = useState([]); const concatRoot = useMemo(() => { - const root: ResourceItemType = { - id: rootId, - open: true, - avatar: FolderImgUrl, - name: t('common:root_folder'), - isFolder: true, - children: dataList - }; - return [root]; - }, [dataList, t]); + if (showRoot) { + const root: ResourceItemType = { + id: rootId, + open: true, + avatar: FolderImgUrl, + name: t('common:root_folder'), + isFolder: true, + children: dataList + }; + return [root]; + } + return dataList; + }, [dataList, t, showRoot]); const { runAsync: requestServer } = useRequest2((e: GetResourceFolderListProps) => { if (requestingIdList.includes(e.parentId)) return Promise.reject(null); @@ -78,7 +84,7 @@ const SelectOneResource = ({ alignItems={'center'} cursor={'pointer'} py={1} - pl={index === 0 ? '0.5rem' : `${1.75 * (index - 1) + 0.5}rem`} + pl={index === 0 && showRoot ? '0.5rem' : `${1.75 * (index - 1) + 0.5}rem`} pr={2} borderRadius={'md'} _hover={{ @@ -110,7 +116,7 @@ const SelectOneResource = ({ } })} > - {index !== 0 && ( + {(index !== 0 || !showRoot) && ( )} - + {item.showAvatar !== false && ( + + )} {item.name} diff --git a/projects/app/src/pageComponents/dashboard/evaluation/dimension/AnswerTextarea.tsx b/projects/app/src/pageComponents/dashboard/evaluation/dimension/AnswerTextarea.tsx new file mode 100644 index 000000000000..177f9b2fa237 --- /dev/null +++ b/projects/app/src/pageComponents/dashboard/evaluation/dimension/AnswerTextarea.tsx @@ -0,0 +1,133 @@ +import React, { useCallback, useEffect, useRef, useState } from 'react'; +import { Box, Flex, FormLabel, Textarea } from '@chakra-ui/react'; +import MyIconButton from '@fastgpt/web/components/common/Icon/button'; + +interface AnswerTextareaProps { + value: string; + onChange: (value: string) => void; + label?: string; + placeholder?: string; + disabled?: boolean; + maxToken?: number; + required?: boolean; +} + +const AnswerTextarea = ({ + value, + onChange, + label, + placeholder, + disabled = false, + maxToken, + required = false +}: AnswerTextareaProps) => { + const [fold, setFold] = useState(true); + const TextareaDom = useRef(null); + + const autoHeightTextarea = useCallback((element: HTMLTextAreaElement) => { + element.style.height = '40px'; + element.style.height = `${element.scrollHeight + 5}px`; + }, []); + + useEffect(() => { + if (TextareaDom.current) { + autoHeightTextarea(TextareaDom.current); + } + }, [value, autoHeightTextarea]); + + const handleTextChange = useCallback( + (e: React.ChangeEvent) => { + const newValue = e.target.value; + onChange(newValue); + if (e.target) { + autoHeightTextarea(e.target); + } + }, + [onChange, autoHeightTextarea] + ); + + const handleFocus = useCallback(() => { + setFold(false); + }, []); + + const handleClickExpand = useCallback(() => { + setFold(!fold); + }, [fold]); + + return ( + + {/* Header */} + {label && ( + + {label} + + + )} + + {/* Content */} + + {disabled ? ( + + {value} + + ) : ( +