From 5e83121f92734f6cb84a7e1521ef4239daad7ce1 Mon Sep 17 00:00:00 2001
From: xhrxgr <1749567727@qq.com>
Date: Sat, 14 Mar 2026 17:08:46 +0800
Subject: [PATCH] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E6=9C=80=E6=96=B0LM?=
=?UTF-8?q?=20Studio=E7=AE=80=E4=BD=93=E4=B8=AD=E6=96=87=E6=9C=AC=E5=9C=B0?=
=?UTF-8?q?=E5=8C=96=E6=96=87=E4=BB=B6=EF=BC=88zh-CN=EF=BC=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
zh-CN/chat.json | 192 ++++++++-------
zh-CN/config.json | 559 +++++++++++++++++++++---------------------
zh-CN/developer.json | 166 +++++++------
zh-CN/discover.json | 32 +--
zh-CN/download.json | 42 ++--
zh-CN/models.json | 97 +++++---
zh-CN/onboarding.json | 30 +--
zh-CN/settings.json | 225 +++++++++--------
zh-CN/shared.json | 143 +++++------
zh-CN/sidebar.json | 4 +-
10 files changed, 778 insertions(+), 712 deletions(-)
diff --git a/zh-CN/chat.json b/zh-CN/chat.json
index eb991101..e98bd34c 100644
--- a/zh-CN/chat.json
+++ b/zh-CN/chat.json
@@ -1,137 +1,139 @@
{
"modelLoaderPlaceholder": "选择要加载的模型",
- "systemPromptPlaceholder": "设置系统提示",
+ "systemPromptPlaceholder": "设置系统提示词",
"userRoleText": "用户",
"assistantRoleText": "助手",
"addMessageButtonText": "添加",
- "addMessageButtonText/toolTip": "在不触发预测的情况下将消息插入上下文中",
+ "addMessageButtonText/toolTip": "插入一条消息到上下文中,但不触发预测",
"sendMessageButtonText": "发送",
- "sendMessageButtonText/toolTip": "将您的提示和对话历史发送给模型进行处理",
+ "sendMessageButtonText/toolTip": "将你的提示词和对话历史发送给模型处理",
"ejectButtonText": "卸载",
"unloadTooltip": "从内存中卸载模型",
"cancelButtonText": "取消",
"loadButtonText": "加载",
"advancedSegmentText": "高级",
- "chatSegmentText": "聊天",
- "chatSidebarTitle": "聊天列表",
- "newChatButton": "新建聊天",
+ "chatSegmentText": "对话",
+ "chatSidebarTitle": "对话列表",
+ "newChatButton": "新建",
"newFolderButton": "新建文件夹",
"viewModeLabel": "视图模式",
- "noChatSelected": "未选择聊天",
- "chatViewOptions": "聊天视图选项",
+ "noChatSelected": "未选择对话",
+ "chatViewOptions": "对话视图选项",
"uiControls/title": "显示设置",
- "noChatSelectedPlaceholder": "请选择一个聊天",
- "unnamedChat": "未命名聊天",
+ "noChatSelectedPlaceholder": "选择一个对话",
+ "unnamedChat": "未命名对话",
"emptyFolder": "文件夹为空",
- "tokenCount": "token数",
- "messageTokenCount": "输入token数",
- "tokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n需要加载模型。",
- "messageTokenCount/hint": "消息中的token数量。使用当前选定模型的分词器计算。\n\n**不包括**附件中的token估计值。",
+ "tokenCount": "Token 数量",
+ "messageTokenCount": "输入 Token 数量",
+ "tokenCount/hint": "消息中的 Token 数量。使用当前选中模型的分词器统计。\n\n需要模型已加载。",
+ "messageTokenCount/hint": "消息中的 Token 数量。使用当前选中模型的分词器统计。\n\n**不包含**文件附件的 Token 估算。",
"notes": "对话笔记",
"notes/add/first": "添加笔记",
- "notes/add/another": "再加一条笔记",
- "notes/hint": "保存此聊天的笔记。笔记仅供您参考,不会发送给模型。所有更改将会自动保存。",
- "notes/placeholder": "在这里键入您的笔记...",
+ "notes/add/another": "添加另一条笔记",
+ "notes/hint": "为此对话保存笔记。笔记仅供你参考,不会发送给模型。所有更改自动保存。",
+ "notes/placeholder": "在此输入笔记...",
"notes/delete": "删除笔记",
"notes/noteLabel": "笔记",
"notes/copyContent": "复制笔记内容",
"actions/sendMessage/error": "发送消息失败",
"actions/loadModel/error": "🥲 加载模型失败",
- "actions/addFile": "[实验性] 将文件附加到此消息\n(.pdf, 纯文本, 或 .docx)",
+ "actions/addFile": "[实验性功能] 附加文件到此消息\n(.pdf、纯文本或 .docx)",
"actions/addFile/label": "附加文件",
- "actions/changeRole": "在用户和助手角色之间切换。\n\n这对于引导对话朝特定方向发展非常有用。\n\n可用于构建‘少样本学习’或‘情境学习’场景",
- "actions/addImage": "添加图片",
+ "actions/changeRole": "在用户和助手角色之间切换。\n\n这有助于引导对话朝特定方向发展。\n\n可用于设置\"少样本学习\"或\"上下文学习\"场景",
+ "actions/addImage": "附加图片",
"actions/deleteMessage": "删除消息",
- "actions/deleteMessage/confirmation": "您确定要删除这条消息吗?",
+ "actions/deleteMessage/confirmation": "确定要删除此消息吗?",
"actions/copyMessage": "复制消息",
"actions/editMessage": "编辑消息",
- "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理的消息,因为它们在运行预处理器后会被覆盖。要编辑消息,您可以:\n\n - 切换到原始消息并对其进行编辑,或者\n - 更改预处理器,使其产生所需的输出。",
+ "actions/editMessage/cannotEditPreprocessed": "无法编辑预处理后的消息,因为运行预处理器后它们会被覆盖。要编辑消息,请:\n\n - 切换到原始消息并进行编辑,或\n - 修改预处理器以产生期望的输出。",
"actions/regenerateMessage": "重新生成消息",
"actions/regenerateMessage/error": "重新生成消息失败",
- "actions/branchChat": "在此消息之后分支聊天",
- "actions/branchChat/error": "分支聊天失败",
+ "actions/branchChat": "从此消息分支对话",
+ "actions/branchChat/error": "分支对话失败",
"actions/continueAssistantMessage": "继续助手消息",
"actions/continueAssistantMessage/error": "继续助手消息失败",
- "actions/predictNext": "生成AI响应",
- "actions/predictNext/error": "生成AI响应失败",
+ "actions/predictNext": "生成 AI 回复",
+ "actions/predictNext/error": "生成 AI 回复失败",
"actions/loadLastModel": "重新加载上次使用的模型",
- "actions/loadLastModel/tooltip": "点击以加载上次与该聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/loadLastModel/tooltip": "点击加载此对话上次使用的模型:\n\n{{lastModel}}",
"actions/loadLastModel/error": "加载上次使用的模型失败。",
+ "actions/clearLastUsedModel": "清除上次使用的模型",
+ "actions/clearLastUsedModel/error": "清除上次使用的模型失败。",
"actions/continueCurrentModel": "使用当前模型",
"actions/continueCurrentModel/tooltip": "当前模型:{{currentModel}}",
"actions/changeToLastUsedModel": "加载 {{lastModel}}",
"actions/changeToLastUsedModel/error": "切换到上次使用的模型失败。",
- "actions/changeToLastUsedModel/tooltip": "您上次在这个聊天中发送消息时使用了不同的模型。点击以卸载当前选定的模型({{currentModel}})并加载上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/changeToLastUsedModel/tooltip": "你上次在此对话中发送消息时使用了不同的模型。点击卸载当前选中的模型({{currentModel}})并加载此对话上次使用的模型:\n\n{{lastModel}}",
"actions/switchToLastUsedModel": "切换到 {{lastModel}}",
- "actions/switchToLastUsedModel/tooltip": "点击以切换到上次与这个聊天一起使用的模型:\n\n{{lastModel}}",
+ "actions/switchToLastUsedModel/tooltip": "点击切换到上次使用的模型:\n\n{{lastModel}}",
"actions/loadModel": "加载模型",
- "actions/toggleViewingProcessed/currentlyFalse": "当前查看的是原始消息。点击以查看预处理后的消息。",
- "actions/toggleViewingProcessed/currentlyTrue": "当前查看的是预处理后的消息。点击以查看原始消息。",
- "actions/toggleViewingProcessed/hint": "在消息发送给模型之前,它可能会被提示预处理器预处理。点击以切换查看原始消息和预处理后的消息。只有预处理后的消息会发送给模型。",
+ "actions/toggleViewingProcessed/currentlyFalse": "当前正在查看原始消息。点击查看预处理后的消息。",
+ "actions/toggleViewingProcessed/currentlyTrue": "当前正在查看预处理后的消息。点击查看原始消息。",
+ "actions/toggleViewingProcessed/hint": "消息发送给模型前,可能会由提示词预处理器进行预处理。点击可在查看原始消息和预处理后消息之间切换。只有预处理后的消息会发送给模型。",
"editMessageConfirm/title": "保留更改?",
- "editMessageConfirm/message": "您已对消息进行了更改。您想要保留这些更改吗?",
+ "editMessageConfirm/message": "你已修改消息。是否保留更改?",
"editMessageConfirm/keepEditing": "继续编辑",
"editMessageConfirm/save": "保存",
"editMessageConfirm/discard": "放弃更改",
- "tokenCount/totalNotAvailable": "token:{{current}}",
- "tokenCount/totalAvailable": "token:{{current}}/{{total}}",
- "tokenCount/totalAvailablePercentage": "上下文已满 {{percentage}}%",
- "tokenCount/contextOverflow": "未经处理的上下文大于模型的最大token限制。根据您的上下文溢出策略,上下文可能会被截断,或者消息可能不会被发送。",
+ "tokenCount/totalNotAvailable": "Token:{{current}}",
+ "tokenCount/totalAvailable": "Token:{{current}}/{{total}}",
+ "tokenCount/totalAvailablePercentage": "上下文已使用 {{percentage}}%",
+ "tokenCount/contextOverflow": "未处理的上下文超过了模型的最大 Token 限制。根据你的上下文溢出策略,上下文可能会被截断或消息可能无法发送。",
"modelLoader/manualLoadParams/label": "手动选择模型加载参数",
- "modelLoader/manualLoadParams/hint/before": "(或按住",
- "modelLoader/manualLoadParams/hint/after": ")",
+ "modelLoader/manualLoadParams/hint/before": "(或按住 ",
+ "modelLoader/manualLoadParams/hint/after": ")",
"actions/move/error": "移动失败",
"actions/rename/error": "重命名失败",
- "actions/createChatAtRoot": "新建聊天...",
- "actions/createChatAtRoot/error": "在根目录创建聊天失败",
+ "actions/createChatAtRoot": "新建对话...",
+ "actions/createChatAtRoot/error": "在根目录创建对话失败",
"actions/createFolderAtRoot": "新建文件夹...",
"actions/createFolderAtRoot/error": "在根目录创建文件夹失败",
"actions/openInFolder/mac": "在 Finder 中显示",
"actions/openInFolder/pc": "在文件资源管理器中显示",
- "actions/createChat/error": "创建聊天失败",
- "actions/deleteChat/errorTitle": "删除聊天失败",
+ "actions/createChat/error": "创建对话失败",
+ "actions/deleteChat/errorTitle": "删除对话失败",
- "userFile/fileSizeLimit": "文件大小限制为",
+ "userFile/fileSizeLimit": "文件大小限制为 ",
"userFile/noImageSupport": "模型不支持图片输入",
"userFile/errorPrefix": "错误 - ",
- "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持",
- "userFile/supportedImageSuffix": "。",
+ "userFile/supportedImagePrefix": "不支持的图片类型 - 仅支持 ",
+ "userFile/supportedImageSuffix": "",
"userFile/unsupportedFileType": "不支持的文件类型 - 仅支持图片、PDF 和 .txt 文件。",
- "userFile/maxFilesPerMessage": "每条消息的最大文件数已达到。不能添加超过 {{files}} 个文件。",
- "userFile/maxFileSizePerMessage": "每条消息的最大文件大小已达到。不能添加超过 {{size}} 的文件。",
- "userFile/maxFileSizePerConversation": "会话的文件大小已达上限,无法添加大于 {{size}} 的文件。",
- "userFile/failedToUploadError/title": "文件上传失败",
- "userFile/failedToAddFile/title": "文件添加到对话失败",
+ "userFile/maxFilesPerMessage": "每条消息的文件数量已达上限。无法添加超过 {{files}} 个文件。",
+ "userFile/maxFileSizePerMessage": "每条消息的文件大小已达上限。无法添加超过 {{size}} 的文件。",
+ "userFile/maxFileSizePerConversation": "每个对话的文件大小已达上限。无法添加超过 {{size}} 的文件。",
+ "userFile/failedToUploadError/title": "上传文件失败",
+ "userFile/failedToAddFile/title": "添加文件到对话失败",
"errorTitle": "错误",
"userFile/chatTerminalDocumentsCount_one": "对话中有 {{count}} 个文档",
"userFile/chatTerminalDocumentsCount_other": "对话中有 {{count}} 个文档",
- "prediction/busyModel/title": "模型忙碌中",
+ "prediction/busyModel/title": "模型正忙",
"prediction/busyModel/message": "请等待模型完成后再试",
"prediction/noModel/title": "未选择模型",
"prediction/modelLoading": "消息已排队,将在模型加载完成后发送",
- "prediction/noModel/message": "选择一个模型以发送消息",
+ "prediction/noModel/message": "选择模型以发送消息",
"prediction/unloadModel/error": "卸载模型失败",
"retrieval/user/processingLabel": "AI 正在思考...",
- "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击以展开。",
- "retrieval/actions/clickToExpand": "点击以展开中间步骤",
- "retrieval/actions/clickToCollapse": "点击以折叠中间步骤",
+ "retrieval/powerUser/intermediateStepsHidden": "中间步骤已隐藏。点击展开。",
+ "retrieval/actions/clickToExpand": "点击展开中间步骤",
+ "retrieval/actions/clickToCollapse": "点击收起中间步骤",
- "style": "聊天外观",
+ "style": "外观",
"style/viewMode/markdown": "Markdown",
"style/viewMode/plaintext": "纯文本",
"style/viewMode/monospace": "等宽字体",
- "speculativeDecodingVisualization/toggle": "可视化已采纳的草稿token",
- "speculativeDecodingVisualization/fromDraftModel_one": "已采纳的草稿token",
- "speculativeDecodingVisualization/fromDraftModel_other": "已采纳的草稿token",
- "speculativeDecodingVisualization/cannotChangeViewMode": "可视化草稿token时无法切换显示模式。",
+ "speculativeDecodingVisualization/toggle": "可视化已接受的草稿 Token",
+ "speculativeDecodingVisualization/fromDraftModel_one": "已接受的草稿 Token",
+ "speculativeDecodingVisualization/fromDraftModel_other": "已接受的草稿 Token",
+ "speculativeDecodingVisualization/cannotChangeViewMode": "可视化草稿 Token 时无法更改视图模式。",
"style/fontSize/label": "字体大小",
"style/fontSize/medium": "默认",
@@ -141,9 +143,9 @@
"style/debugBlocks/label": "显示调试信息块",
"style/thinkingUI/label": "默认展开推理块",
- "style/chatFullWidth/label": "聊天容器宽度适应窗口",
+ "style/chatFullWidth/label": "将对话容器扩展到窗口宽度",
- "style/chatUtilityMenusShowLabel/label": "显示聊天实用工具菜单",
+ "style/chatUtilityMenusShowLabel/label": "显示对话工具菜单标签",
"messageBlocks": {
"expandBlockTooltip": "展开内容",
@@ -154,20 +156,20 @@
"expandTooltip": "展开调试信息块"
}
},
-
- "chatTabOptions/clearAllMessages": "清空所有聊天记录...",
- "chatTabOptions/duplicateChat": "复制聊天",
- "topBarActions/duplicateChat": "复制聊天",
- "topBarActions/clearChat": "清除所有消息",
- "topBarActions/clearChatConfirmation": "您确定要清除此聊天中的所有消息吗?",
+ "chatTabOptions/clearAllMessages": "清空所有消息...",
+ "chatTabOptions/duplicateChat": "复制对话",
+
+ "topBarActions/duplicateChat": "复制",
+ "topBarActions/clearChat": "清空全部",
+ "topBarActions/clearChatConfirmation": "确定要清空此对话中的所有消息吗?",
"topBarActions/clearChatCancel": "取消",
- "topBarActions/clearChatDelete": "全部清除",
-
- "noModels.indexing": "正在索引模型文件...(这可能需要一段时间)",
- "noModels.downloading": "正在下载您的第一个LLM...",
- "noModels": "还没有LLM!下载一个开始吧!",
-
+ "topBarActions/clearChatDelete": "清空全部",
+
+ "noModels.indexing": "正在索引模型文件...(可能需要片刻)",
+ "noModels.downloading": "正在下载你的第一个大语言模型...",
+ "noModels": "还没有大语言模型!下载一个开始使用吧!",
+
"plugins": {
"pluginTrigger": {
"noPlugins": "插件",
@@ -178,32 +180,32 @@
"dropdown": {
"configure": "配置",
"disable": "禁用",
- "fork": "派生",
+ "fork": "分叉",
"uninstall": "卸载"
},
"actionButtons": {
- "create": "+新建",
+ "create": "创建",
"import": "导入",
"discover": "发现"
},
"recentlyCreated": {
"title": "最近创建的插件",
- "placeholder": "你创建的插件会显示在这里"
+ "placeholder": "你创建的插件将显示在此处"
},
- "startRunningDevelopmentPlugin/error": "开发模式插件启动失败",
- "stopRunningDevelopmentPlugin/error": "开发模式插件停止失败",
+ "startRunningDevelopmentPlugin/error": "启动开发模式插件失败",
+ "stopRunningDevelopmentPlugin/error": "停止开发模式插件失败",
"forceReInitPlugin/error": "重启插件失败"
},
"pluginConfiguration": {
"title": "插件配置",
- "selectAPlugin": "选择一个插件以编辑其配置",
+ "selectAPlugin": "选择插件以编辑其配置",
"preprocessorAndGenerator": "此插件包含自定义预处理器和生成器",
"generatorOnly": "此插件包含自定义生成器",
"preprocessorOnly": "此插件包含自定义预处理器"
},
"instructions": {
- "runTheFollowing": "要运行你的插件,请打开终端并输入:",
- "pushTo": "将插件推送到 Hub 与他人分享(可选)",
+ "runTheFollowing": "要运行插件,请打开终端并输入",
+ "pushTo": "推送到 Hub 与他人分享你的插件(可选)",
"createdSuccessfully": "插件创建成功",
"creatingPlugin": "正在创建插件...",
"projectFilesTitle": "项目文件",
@@ -218,26 +220,28 @@
}
},
"localFork": {
- "error": "创建插件的本地副本失败"
+ "error": "创建插件本地开发副本失败。"
},
"restartErrorPlugin/error": "重启插件失败"
},
"genInfo": {
- "tokensPerSecond": "{{tokensPerSecond}} token/s",
- "predictedTokensCount": "{{predictedTokensCount}} token",
- "timeToFirstTokenSec": "首个token用时 {{timeToFirstTokenSec}} s",
- "stopReason": "停止原因: {{stopReason}}",
- "stopReason.userStopped": "用户已停止",
+ "tokensPerSecond": "{{tokensPerSecond}} Token/秒",
+ "predictedTokensCount": "{{predictedTokensCount}} 个 Token",
+ "timeToFirstTokenSec": "首 Token 时间 {{timeToFirstTokenSec}} 秒",
+ "stopReason": "停止原因:{{stopReason}}",
+ "stopReason.userStopped": "用户停止",
"stopReason.modelUnloaded": "模型已卸载",
"stopReason.failed": "生成失败",
- "stopReason.eosFound": "检测到 EOS token",
- "stopReason.stopStringFound": "发现停止字符串",
+ "stopReason.eosFound": "找到 EOS Token",
+ "stopReason.stopStringFound": "找到停止字符串",
"stopReason.toolCalls": "工具调用",
- "stopReason.maxPredictedTokensReached": "达到最大预测词元",
- "stopReason.contextLengthReached": "达到上下文长度上限",
+ "stopReason.maxPredictedTokensReached": "达到最大预测 Token 数",
+ "stopReason.contextLengthReached": "达到上下文长度限制",
"speculativeDecodedBy": "草稿模型:{{decodedBy}}",
- "speculativeDecodingStats": "已采纳 {{accepted}}/{{total}} 个草稿token({{percentage}}%)"
+ "speculativeDecodingStats": "已接受 {{accepted}}/{{total}} 个草稿 Token({{percentage}}%)",
+ "speculativeDecodingAcceptedPercentage": "{{percentage}}% 草稿 Token 被接受",
+ "speculativeDecodingTooltip": "已接受 {{accepted}}/{{total}} 个草稿 Token"
},
"tabs": {
diff --git a/zh-CN/config.json b/zh-CN/config.json
index 48a45d2f..9ac4169e 100644
--- a/zh-CN/config.json
+++ b/zh-CN/config.json
@@ -4,7 +4,7 @@
"showAdvancedSettings": "显示高级设置",
"showAll": "全部",
"basicSettings": "基础",
- "configSubtitle": "加载或保存预设并尝试模型参数覆盖",
+ "configSubtitle": "加载或保存预设,并尝试模型参数覆盖",
"inferenceParameters/title": "预测参数",
"inferenceParameters/info": "尝试影响预测的参数。",
"generalParameters/title": "通用",
@@ -12,238 +12,245 @@
"basicTab": "基础",
"advancedTab": "高级",
"advancedTab/title": "🧪 高级配置",
- "advancedTab/expandAll": "展开所有",
+ "advancedTab/expandAll": "全部展开",
"advancedTab/overridesTitle": "配置覆盖",
- "advancedTab/noConfigsText": "您没有未保存的更改 - 编辑上方值以在此处查看覆盖。",
+ "advancedTab/noConfigsText": "你没有任何未保存的更改 - 编辑上方数值以在此处查看覆盖项。",
"loadInstanceFirst": "加载模型以查看可配置参数",
"noListedConfigs": "无可配置参数",
"generationParameters/info": "尝试影响文本生成的基础参数。",
"loadParameters/title": "加载参数",
- "loadParameters/description": "控制模型初始化和加载到内存的方式的设置。",
+ "loadParameters/description": "控制模型初始化并加载到内存的设置。",
"loadParameters/reload": "重新加载以应用更改",
"loadParameters/reload/error": "重新加载模型失败",
"discardChanges": "放弃更改",
"loadModelToSeeOptions": "加载模型以查看选项",
- "schematicsError.title": "配置结构在以下字段存在错误:",
+ "schematicsError.title": "配置架构在以下字段中包含错误:",
"manifestSections": {
"structuredOutput/title": "结构化输出",
- "speculativeDecoding/title": "投机解码",
+ "speculativeDecoding/title": "推测解码",
"sampling/title": "采样",
"settings/title": "设置",
- "toolUse/title": "工具调用",
+ "toolUse/title": "工具使用",
"promptTemplate/title": "提示词模板",
"customFields/title": "自定义字段"
},
- "llm.prediction.systemPrompt/title": "系统提示",
- "llm.prediction.systemPrompt/description": "使用此字段向模型提供背景指令,如一套规则、约束或一般要求。",
+ "llm.prediction.systemPrompt/title": "系统提示词",
+ "llm.prediction.systemPrompt/description": "使用此字段为模型提供背景指令,例如一组规则、约束或通用要求。",
"llm.prediction.systemPrompt/subTitle": "AI 指南",
"llm.prediction.systemPrompt/openEditor": "编辑器",
"llm.prediction.systemPrompt/closeEditor": "关闭编辑器",
- "llm.prediction.systemPrompt/openedEditor": "在编辑器中打开...",
- "llm.prediction.systemPrompt/edit": "编辑系统提示符...",
- "llm.prediction.systemPrompt/addInstructionsWithMore": "添加说明...",
- "llm.prediction.systemPrompt/addInstructions": "添加说明",
+ "llm.prediction.systemPrompt/openedEditor": "已在编辑器中打开...",
+ "llm.prediction.systemPrompt/edit": "编辑系统提示词...",
+ "llm.prediction.systemPrompt/addInstructionsWithMore": "添加指令...",
+ "llm.prediction.systemPrompt/addInstructions": "添加指令",
"llm.prediction.temperature/title": "温度",
- "llm.prediction.temperature/subTitle": "引入多少随机性。0 将始终产生相同的结果,而较高值将增加创造性和变化。",
- "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,它在随机性和确定性之间提供了平衡。极端情况下,温度为 0 会始终选择最可能的下一个token,导致每次运行的输出相同\"",
+ "llm.prediction.temperature/subTitle": "引入多少随机性。0 将每次产生相同结果,而较高值将增加创造性和变化性",
+ "llm.prediction.temperature/info": "来自 llama.cpp 帮助文档:\"默认值为 <{{dynamicValue}}>,在随机性和确定性之间提供平衡。在极端情况下,温度为 0 将始终选择最可能的下一个 Token,导致每次运行输出相同\"",
"llm.prediction.llama.sampling/title": "采样",
"llm.prediction.topKSampling/title": "Top K 采样",
- "llm.prediction.topKSampling/subTitle": "将下一个token限制为模型预测的前 k 个最可能的token。作用类似于温度",
- "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种仅从模型预测的前 k 个最可能的token中选择下一个token的文本生成方法。\n\n它有助于减少生成低概率或无意义token的风险,但也可能限制输出的多样性。\n\n更高的 top-k 值(例如,100)将考虑更多token,从而生成更多样化的文本,而较低的值(例如,10)将专注于最可能的token,生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
- "llm.prediction.llama.cpuThreads/title": "CPU 线程",
+ "llm.prediction.topKSampling/subTitle": "将下一个 Token 限制为概率最高的前 k 个 Token 之一。作用类似于温度",
+ "llm.prediction.topKSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-k 采样是一种文本生成方法,仅从模型预测的前 k 个最可能的 Token 中选择下一个 Token。\n\n它有助于降低生成低概率或无意义 Token 的风险,但也可能限制输出的多样性。\n\n较高的 top-k 值(如 100)将考虑更多 Token 并产生更多样化的文本,而较低值(如 10)将聚焦于最可能的 Token 并生成更保守的文本。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.llama.cpuThreads/title": "CPU 线程数",
"llm.prediction.llama.cpuThreads/subTitle": "推理期间使用的 CPU 线程数",
- "llm.prediction.llama.cpuThreads/info": "计算期间要使用的线程数。增加线程数并不总是与更好的性能相关联。默认值为 <{{dynamicValue}}>。",
- "llm.prediction.maxPredictedTokens/title": "限制响应长度",
- "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 响应的长度",
- "llm.prediction.maxPredictedTokens/info": "控制聊天机器人的响应最大长度。开启以设置响应的最大长度限制,或关闭以让聊天机器人决定何时停止。",
- "llm.prediction.maxPredictedTokens/inputLabel": "最大响应长度(token)",
- "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 词",
+ "llm.prediction.llama.cpuThreads/info": "计算期间使用的线程数。增加线程数并不总是与更好的性能相关。默认值为 <{{dynamicValue}}>。",
+ "llm.prediction.maxPredictedTokens/title": "限制回复长度",
+ "llm.prediction.maxPredictedTokens/subTitle": "可选地限制 AI 回复的最大长度",
+ "llm.prediction.maxPredictedTokens/info": "控制聊天机器人回复的最大长度。开启以设置回复的最大长度限制,或关闭以让聊天机器人自行决定何时停止。",
+ "llm.prediction.maxPredictedTokens/inputLabel": "最大回复长度(Token)",
+ "llm.prediction.maxPredictedTokens/wordEstimate": "约 {{maxWords}} 个单词",
"llm.prediction.repeatPenalty/title": "重复惩罚",
- "llm.prediction.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
- "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"有助于防止模型生成重复或单调的文本。\n\n更高的值(例如,1.5)将更强烈地惩罚重复,而更低的值(例如,0.9)将更为宽容。\" • 默认值为 <{{dynamicValue}}>",
- "llm.prediction.minPSampling/title": "最小 P 采样",
- "llm.prediction.minPSampling/subTitle": "token被选为输出的最低基本概率",
- "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能token的概率,token被视为考虑的最低概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.repeatPenalty/subTitle": "对重复相同 Token 的惩罚程度",
+ "llm.prediction.repeatPenalty/info": "来自 llama.cpp 帮助文档:\"帮助防止模型生成重复或单调的文本。\n\n较高值(如 1.5)将更强烈地惩罚重复,而较低值(如 0.9)将更宽松。\" • 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.minPSampling/title": "Min P 采样",
+ "llm.prediction.minPSampling/subTitle": "Token 被选为输出的最小基础概率",
+ "llm.prediction.minPSampling/info": "来自 llama.cpp 帮助文档:\n\n相对于最可能 Token 的概率,Token 被考虑的最小概率。必须在 [0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
"llm.prediction.topPSampling/title": "Top P 采样",
- "llm.prediction.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
- "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核心采样,是另一种文本生成方法,从累积概率至少为 p 的token子集中选择下一个token。\n\n这种方法通过同时考虑token的概率和要从中采样的token数量,在多样性和质量之间提供了平衡。\n\n更高的 top-p 值(例如,0.95)将导致更多样化的文本,而较低的值(例如,0.5)将生成更集中和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
+ "llm.prediction.topPSampling/subTitle": "可能下一个 Token 的最小累积概率。作用类似于温度",
+ "llm.prediction.topPSampling/info": "来自 llama.cpp 帮助文档:\n\nTop-p 采样,也称为核采样,是另一种文本生成方法,从累积概率至少为 p 的 Token 子集中选择下一个 Token。\n\n该方法通过同时考虑 Token 概率和采样 Token 数量来平衡多样性和质量。\n\n较高的 top-p 值(如 0.95)将产生更多样化的文本,而较低值(如 0.5)将生成更聚焦和保守的文本。必须在 (0, 1] 范围内。\n\n• 默认值为 <{{dynamicValue}}>",
"llm.prediction.stopStrings/title": "停止字符串",
- "llm.prediction.stopStrings/subTitle": "应该停止模型生成更多token的字符串",
- "llm.prediction.stopStrings/info": "遇到特定字符串时将停止模型生成更多token",
- "llm.prediction.stopStrings/placeholder": "输入一个字符串并按 ⏎",
+ "llm.prediction.stopStrings/subTitle": "应停止模型生成更多 Token 的字符串",
+ "llm.prediction.stopStrings/info": "遇到时将停止模型生成更多 Token 的特定字符串",
+ "llm.prediction.stopStrings/placeholder": "输入字符串并按 ⏎",
"llm.prediction.contextOverflowPolicy/title": "上下文溢出",
- "llm.prediction.contextOverflowPolicy/subTitle": "当对话超出模型处理能力时,模型应该如何表现",
- "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型的工作内存('上下文')大小时该怎么做",
+ "llm.prediction.contextOverflowPolicy/subTitle": "当对话过大模型无法处理时的行为方式",
+ "llm.prediction.contextOverflowPolicy/info": "决定当对话超过模型工作内存(\"上下文\")大小时的处理方式",
"llm.prediction.llama.frequencyPenalty/title": "频率惩罚",
"llm.prediction.llama.presencePenalty/title": "存在惩罚",
"llm.prediction.llama.tailFreeSampling/title": "尾部自由采样",
"llm.prediction.llama.locallyTypicalSampling/title": "局部典型采样",
"llm.prediction.llama.xtcProbability/title": "XTC 采样概率",
- "llm.prediction.llama.xtcProbability/subTitle": "XTC(排除顶选)采样器将在每个生成token时以该概率激活。XTC 采样有助于提升创造力,减少陈词滥调",
- "llm.prediction.llama.xtcProbability/info": "XTC(排除顶选)采样将以该概率在每个token生成时激活。XTC 采样通常可以提升创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcProbability/subTitle": "XTC(排除顶部选择)采样器将以此概率在每个生成的 Token 上激活。XTC 采样可以提升创造力并减少陈词滥调",
+ "llm.prediction.llama.xtcProbability/info": "XTC(排除顶部选择)采样将以此概率在每个生成的 Token 上激活。XTC 采样通常能提升创造力并减少陈词滥调",
"llm.prediction.llama.xtcThreshold/title": "XTC 采样阈值",
- "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的token,并仅保留其中概率最低的一个",
- "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶选)阈值。在 `xtc-probability` 概率下,查找概率介于 `xtc-threshold` 和 0.5 之间的所有token,仅保留概率最低的一个,其余全部移除",
+ "llm.prediction.llama.xtcThreshold/subTitle": "XTC(排除顶部选择)阈值。以 `xtc-probability` 的概率,搜索概率在 `xtc-threshold` 和 0.5 之间的 Token,并移除所有这些 Token 除最不可能的一个外",
+ "llm.prediction.llama.xtcThreshold/info": "XTC(排除顶部选择)阈值。以 `xtc-probability` 的概率,搜索概率在 `xtc-threshold` 和 0.5 之间的 Token,并移除所有这些 Token 除最不可能的一个外",
"llm.prediction.mlx.topKSampling/title": "Top K 采样",
- "llm.prediction.mlx.topKSampling/subTitle": "将下一个token限制为概率最高的前 k 个token。作用类似于温度",
- "llm.prediction.mlx.topKSampling/info": "仅从概率最高的前 k 个token中选择下一个token,作用类似于温度",
+ "llm.prediction.mlx.topKSampling/subTitle": "将下一个 Token 限制为概率最高的前 k 个 Token 之一。作用类似于温度",
+ "llm.prediction.mlx.topKSampling/info": "将下一个 Token 限制为概率最高的前 k 个 Token 之一。作用类似于温度",
"llm.prediction.onnx.topKSampling/title": "Top K 采样",
- "llm.prediction.onnx.topKSampling/subTitle": "将下一个token限制为前 k 个最可能的token。作用类似于温度",
- "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n保留最高概率词汇表token的数量以进行 top-k 过滤\n\n• 默认情况下此过滤器关闭",
+ "llm.prediction.onnx.topKSampling/subTitle": "将下一个 Token 限制为概率最高的前 k 个 Token 之一。作用类似于温度",
+ "llm.prediction.onnx.topKSampling/info": "来自 ONNX 文档:\n\n用于 top-k 过滤的最高概率词汇 Token 数量\n\n• 此过滤器默认关闭",
"llm.prediction.onnx.repeatPenalty/title": "重复惩罚",
- "llm.prediction.onnx.repeatPenalty/subTitle": "多大程度上避免重复相同的token",
- "llm.prediction.onnx.repeatPenalty/info": "更高的值阻止模型重复自身",
+ "llm.prediction.onnx.repeatPenalty/subTitle": "对重复相同 Token 的惩罚程度",
+ "llm.prediction.onnx.repeatPenalty/info": "较高值会阻止模型重复自己",
"llm.prediction.onnx.topPSampling/title": "Top P 采样",
- "llm.prediction.onnx.topPSampling/subTitle": "可能的下一个token的最小累积概率。作用类似于温度",
- "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到或超过 TopP 的最可能token用于生成\n\n• 默认情况下此过滤器关闭",
- "llm.prediction.seed/title": "种子",
+ "llm.prediction.onnx.topPSampling/subTitle": "可能下一个 Token 的最小累积概率。作用类似于温度",
+ "llm.prediction.onnx.topPSampling/info": "来自 ONNX 文档:\n\n仅保留累积概率达到 TopP 或更高的最可能 Token 用于生成\n\n• 此过滤器默认关闭",
+ "llm.prediction.seed/title": "随机种子",
"llm.prediction.structured/title": "结构化输出",
"llm.prediction.structured/info": "结构化输出",
- "llm.prediction.structured/description": "高级:您可以提供[JSON Schema](https://json-schema.org/learn/miscellaneous-examples)来强制执行模型中的特定输出格式。阅读[留档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
- "llm.prediction.tools/title": "工具调用",
- "llm.prediction.tools/description": "高级功能:你可以提供 JSON 格式的工具列表,模型可请求调用这些工具。详情请查阅[文档](https://lmstudio.ai/docs/advanced/tool-use)",
- "llm.prediction.tools/serverPageDescriptionAddon": "通过服务端 API 调用时,请将其作为 `tools` 字段传入请求体",
- "llm.prediction.promptTemplate/title": "提示模板",
- "llm.prediction.promptTemplate/subTitle": "聊天中消息发送给模型的格式。更改此设置可能会引入意外行为 - 确保您知道自己在做什么!",
- "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "草稿生成token数",
- "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "每生成一个主模型token,草稿模型生成的token数量。平衡计算量与收益,选择合适的数值",
- "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "草稿概率阈值",
- "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "仅当token概率高于该阈值时才继续草稿。值越高风险越低,收益也越低",
- "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿长度",
- "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "草稿长度低于该值将被主模型忽略。值越高风险越低,收益也越低",
- "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿长度",
- "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大token数。如果所有token概率都高于阈值,则为上限。值越低风险越低,收益也越低",
+ "llm.prediction.structured/description": "高级功能:你可以提供 [JSON Schema](https://json-schema.org/learn/miscellaneous-examples) 来强制模型以特定格式输出。阅读[文档](https://lmstudio.ai/docs/advanced/structured-output)了解更多",
+ "llm.prediction.tools/title": "工具使用",
+ "llm.prediction.tools/description": "高级功能:你可以提供符合 JSON 格式的工具列表供模型请求调用。阅读[文档](https://lmstudio.ai/docs/advanced/tool-use)了解更多",
+ "llm.prediction.tools/serverPageDescriptionAddon": "使用服务器 API 时,通过请求体传递 `tools` 参数",
+ "llm.prediction.promptTemplate/title": "提示词模板",
+ "llm.prediction.promptTemplate/subTitle": "聊天消息发送给模型的格式。更改此选项可能引入意外行为 - 请确保你知道自己在做什么!",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/title": "生成草稿 Token 数",
+ "llm.prediction.speculativeDecoding.numDraftTokensExact/subTitle": "每个主模型 Token 使用草稿模型生成的 Token 数量。找到计算量与收益的最佳平衡点",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/title": "起草概率截止值",
+ "llm.prediction.speculativeDecoding.minContinueDraftingProbability/subTitle": "继续起草直到 Token 概率低于此阈值。较高值通常意味着较低风险、较低收益",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/title": "最小草稿大小",
+ "llm.prediction.speculativeDecoding.minDraftLengthToConsider/subTitle": "小于此值的草稿将被主模型忽略。较高值通常意味着较低风险、较低收益",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/title": "最大草稿大小",
+ "llm.prediction.speculativeDecoding.maxTokensToDraft/subTitle": "草稿中允许的最大 Token 数。如果所有 Token 概率都大于截止值时的上限。较低值通常意味着较低风险、较低收益",
"llm.prediction.speculativeDecoding.draftModel/title": "草稿模型",
- "llm.prediction.reasoning.parsing/title": "推理过程解析方式",
- "llm.prediction.reasoning.parsing/subTitle": "控制模型输出中推理过程的解析方式",
+ "llm.prediction.reasoning.parsing/title": "推理部分解析",
+ "llm.prediction.reasoning.parsing/subTitle": "如何解析模型输出中的推理部分",
"llm.load.mainGpu/title": "主 GPU",
- "llm.load.mainGpu/subTitle": "用于模型计算的 GPU 优先级",
+ "llm.load.mainGpu/subTitle": "优先用于模型计算的 GPU",
"llm.load.mainGpu/placeholder": "选择主 GPU...",
- "llm.load.splitStrategy/title": "拆分策略",
- "llm.load.splitStrategy/subTitle": "如何跨 GPU 拆分模型计算",
- "llm.load.splitStrategy/placeholder": "选择拆分策略...",
+ "llm.load.splitStrategy/title": "分配策略",
+ "llm.load.splitStrategy/subTitle": "如何在多个 GPU 之间分配模型计算",
+ "llm.load.splitStrategy/placeholder": "选择分配策略...",
"llm.load.offloadKVCacheToGpu/title": "将 KV 缓存卸载到 GPU 内存",
- "llm.load.offloadKVCacheToGpu/subTitle": "将 KV 缓存卸载到 GPU 内存。这可以提高性能但需要更多 GPU 内存",
- "load.gpuStrictVramCap/title": "限制模型卸载至专用 GPU 内存",
- "load.gpuStrictVramCap.customSubTitleOff": "关闭:若专用 GPU 内存已满,允许将模型权重卸载至共享内存",
- "load.gpuStrictVramCap.customSubTitleOn": "开启:系统将限制模型权重的卸载仅限于专用 GPU 内存及 RAM 。上下文仍可能使用共享内存",
- "load.gpuStrictVramCap.customGpuOffloadWarning": "模型的卸载仅限于专用 GPU 内存。实际卸载的层数可能会有所不同",
- "load.allGpusDisabledWarning": "所有 GPU 目前均被禁用。请启用至少一个以进行卸载",
+ "llm.load.offloadKVCacheToGpu/subTitle": "将 KV 缓存卸载到 GPU 内存。提升性能但需要更多 GPU 内存",
+ "llm.load.numParallelSessions/title": "最大并发预测数",
+ "llm.load.numParallelSessions/subTitle": "模型可同时运行的最大预测数量。并发时每个单独预测的速度可能会降低,但每个预测启动更快且可实现更高的总吞吐量",
+ "llm.load.useUnifiedKvCache/title": "统一 KV 缓存",
+ "llm.load.useUnifiedKvCache/subTitle": "控制并发预测是否共享单个 KV 缓存以节省内存。禁用此选项可确保每个预测都能使用完整的上下文长度,但会消耗更多内存",
+ "load.gpuStrictVramCap/title": "限制模型卸载到专用 GPU 内存",
+ "load.gpuStrictVramCap.customSubTitleOff": "关闭:如果专用 GPU 内存已满,允许将模型权重卸载到共享内存",
+ "load.gpuStrictVramCap.customSubTitleOn": "开启:系统将限制模型权重卸载到专用 GPU 内存和内存。上下文仍可使用共享内存",
+ "load.gpuStrictVramCap.customGpuOffloadWarning": "模型卸载限制到专用 GPU 内存。实际卸载层数可能有所不同",
+ "load.allGpusDisabledWarning": "所有 GPU 当前已禁用。至少启用一个以进行卸载",
"llm.load.contextLength/title": "上下文长度",
- "llm.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "llm.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
- "llm.load.contextLength/warning": "设置较高的上下文长度值会对内存使用产生显著影响",
- "llm.load.seed/title": "种子",
- "llm.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机",
- "llm.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+ "llm.load.contextLength/subTitle": "模型在一次提示中可以关注到的最大 Token 数。查看\"推理参数\"下的对话溢出选项以了解更多管理方式",
+ "llm.load.contextLength/info": "指定模型一次可以考虑的最大 Token 数量,影响其在处理期间保留的上下文量",
+ "llm.load.contextLength/warning": "设置较高的上下文长度值可能会显著影响内存使用",
+ "llm.load.seed/title": "随机种子",
+ "llm.load.seed/subTitle": "文本生成中使用的随机数生成器种子。-1 为随机",
+ "llm.load.seed/info": "随机种子:设置随机数生成器的种子以确保可复现的结果",
+ "llm.load.numCpuExpertLayersRatio/title": "强制将 MoE 权重放到 CPU 的层数",
+ "llm.load.numCpuExpertLayersRatio/subTitle": "强制将专家层放到 CPU 的层数。节省显存且可能比部分 GPU 卸载更快。如果模型完全适合显存则不推荐。",
+ "llm.load.numCpuExpertLayersRatio/info": "指定强制将专家层放到 CPU 的层数。将注意力层保留在 GPU 上,在保持推理速度的同时节省显存。",
- "llm.load.llama.evalBatchSize/title": "评估批处理大小",
- "llm.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
- "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
- "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
- "llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "llm.load.llama.evalBatchSize/title": "评估批次大小",
+ "llm.load.llama.evalBatchSize/subTitle": "一次处理的输入 Token 数量。增加此值会提高性能但会增加内存使用",
+ "llm.load.llama.evalBatchSize/info": "设置评估期间一起处理的示例数量,影响速度和内存使用",
+ "llm.load.llama.ropeFrequencyBase/title": "RoPE 频率基数",
+ "llm.load.llama.ropeFrequencyBase/subTitle": "旋转位置编码(RoPE)的自定义基础频率。增加此值可能在高上下文长度下获得更好的性能",
+ "llm.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基础频率,影响位置信息的嵌入方式",
+ "llm.load.llama.ropeFrequencyScale/title": "RoPE 频率缩放",
+ "llm.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放以使用 RoPE 扩展有效上下文",
+ "llm.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放以控制位置编码粒度",
"llm.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "llm.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
- "llm.load.llama.flashAttention/title": "快速注意力",
- "llm.load.llama.flashAttention/subTitle": "降低某些模型的内存使用量和生成时间",
- "llm.load.llama.flashAttention/info": "加速注意力机制,实现更快、更高效的处理",
+ "llm.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的 GPU 计算的离散模型层数",
+ "llm.load.llama.acceleration.offloadRatio/info": "设置要卸载到 GPU 的层数。",
+ "llm.load.llama.flashAttention/title": "Flash Attention",
+ "llm.load.llama.flashAttention/subTitle": "在某些模型上降低内存使用和生成时间",
+ "llm.load.llama.flashAttention/info": "加速注意力机制以实现更快更高效的处理",
"llm.load.numExperts/title": "专家数量",
"llm.load.numExperts/subTitle": "模型中使用的专家数量",
"llm.load.numExperts/info": "模型中使用的专家数量",
- "llm.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "llm.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "llm.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
- "llm.load.llama.useFp16ForKVCache/title": "使用 FP16 用于 KV 缓存",
- "llm.load.llama.useFp16ForKVCache/info": "通过以半精度(FP16)存储缓存来减少内存使用",
+ "llm.load.llama.keepModelInMemory/title": "将模型保留在内存中",
+ "llm.load.llama.keepModelInMemory/subTitle": "即使卸载到 GPU 也为模型保留系统内存。提升性能但需要更多系统内存",
+ "llm.load.llama.keepModelInMemory/info": "防止模型被交换到磁盘,确保更快访问但代价是更高的内存使用",
+ "llm.load.llama.useFp16ForKVCache/title": "对 KV 缓存使用 FP16",
+ "llm.load.llama.useFp16ForKVCache/info": "以半精度(FP16)存储缓存以减少内存使用",
"llm.load.llama.tryMmap/title": "尝试 mmap()",
- "llm.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "llm.load.llama.tryMmap/subTitle": "改善模型加载时间。禁用此选项可能在模型大于可用系统内存时提升性能",
"llm.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
"llm.load.llama.cpuThreadPoolSize/title": "CPU 线程池大小",
- "llm.load.llama.cpuThreadPoolSize/subTitle": "为模型计算分配的 CPU 线程池线程数",
- "llm.load.llama.cpuThreadPoolSize/info": "分配用于模型计算的 CPU 线程池线程数量。线程数增加未必总能带来更佳性能。默认值为 <{{dynamicValue}}>。",
+ "llm.load.llama.cpuThreadPoolSize/subTitle": "分配给模型计算使用的线程池的 CPU 线程数",
+ "llm.load.llama.cpuThreadPoolSize/info": "分配给模型计算使用的线程池的 CPU 线程数。增加线程数并不总是与更好的性能相关。默认值为 <{{dynamicValue}}>。",
"llm.load.llama.kCacheQuantizationType/title": "K 缓存量化类型",
- "llm.load.llama.kCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
+ "llm.load.llama.kCacheQuantizationType/subTitle": "较低值减少内存使用但可能降低质量。效果因模型而异。",
"llm.load.llama.vCacheQuantizationType/title": "V 缓存量化类型",
- "llm.load.llama.vCacheQuantizationType/subTitle": "较低的量化类型可降低内存占用,但可能影响模型质量,不同模型效果差异大。",
- "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如未启用Flash Attention,请务必关闭该选项",
- "llm.load.llama.vCacheQuantizationType/disabledMessage": "仅在启用Flash Attention时可用",
- "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用 F32 时请禁用Flash Attention",
- "llm.load.mlx.kvCacheBits/title": "KV 缓存量化位数",
- "llm.load.mlx.kvCacheBits/subTitle": "KV 缓存量化使用的位数",
- "llm.load.mlx.kvCacheBits/info": "设置 KV 缓存需要量化成的位数",
- "llm.load.mlx.kvCacheBits/turnedOnWarning": "启用 KV 缓存量化时,上下文长度设置将被忽略",
- "llm.load.mlx.kvCacheGroupSize/title": "KV 缓存量化分组大小",
- "llm.load.mlx.kvCacheGroupSize/subTitle": "量化操作时分组的大小,组越大内存占用越低,但模型质量可能下降",
- "llm.load.mlx.kvCacheGroupSize/info": "KV 缓存量化时使用的分组位数",
- "llm.load.mlx.kvCacheQuantizationStart/title": "KV 缓存量化:开始量化的上下文长度",
- "llm.load.mlx.kvCacheQuantizationStart/subTitle": "达到此上下文长度后开始对 KV 缓存进行量化",
- "llm.load.mlx.kvCacheQuantizationStart/info": "达到此上下文长度后开始对 KV 缓存进行量化",
+ "llm.load.llama.vCacheQuantizationType/subTitle": "较低值减少内存使用但可能降低质量。效果因模型而异。",
+ "llm.load.llama.vCacheQuantizationType/turnedOnWarning": "⚠️ 如果未启用 Flash Attention,必须禁用此选项",
+ "llm.load.llama.vCacheQuantizationType/disabledMessage": "仅在启用 Flash Attention 时才能开启",
+ "llm.load.llama.vCacheQuantizationType/invalidF32MetalState": "⚠️ 使用 F32 时必须禁用 Flash Attention",
+ "llm.load.mlx.kvCacheBits/title": "KV 缓存量化",
+ "llm.load.mlx.kvCacheBits/subTitle": "KV 缓存应量化到的位数",
+ "llm.load.mlx.kvCacheBits/info": "KV 缓存应量化到的位数",
+ "llm.load.mlx.kvCacheBits/turnedOnWarning": "使用 KV 缓存量化时忽略上下文长度设置",
+ "llm.load.mlx.kvCacheGroupSize/title": "KV 缓存量化:组大小",
+ "llm.load.mlx.kvCacheGroupSize/subTitle": "KV 缓存量化操作期间的组大小。较高组大小减少内存使用但可能降低质量",
+ "llm.load.mlx.kvCacheGroupSize/info": "KV 缓存应量化到的位数",
+ "llm.load.mlx.kvCacheQuantizationStart/title": "KV 缓存量化:当上下文超过此长度时开始量化",
+ "llm.load.mlx.kvCacheQuantizationStart/subTitle": "开始量化 KV 缓存的上下文长度阈值",
+ "llm.load.mlx.kvCacheQuantizationStart/info": "开始量化 KV 缓存的上下文长度阈值",
"llm.load.mlx.kvCacheQuantization/title": "KV 缓存量化",
- "llm.load.mlx.kvCacheQuantization/subTitle": "对模型的 KV 缓存进行量化,可加快生成速度并降低内存占用,但可能影响输出质量。",
+ "llm.load.mlx.kvCacheQuantization/subTitle": "量化模型的 KV 缓存。这可能会加快生成速度并降低内存占用,但会牺牲模型输出质量。",
"llm.load.mlx.kvCacheQuantization/bits/title": "KV 缓存量化位数",
- "llm.load.mlx.kvCacheQuantization/bits/tooltip": "KV 缓存量化所用的位数",
- "llm.load.mlx.kvCacheQuantization/bits/bits": "位数",
- "llm.load.mlx.kvCacheQuantization/groupSize/title": "分组策略",
- "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "高精度",
- "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "均衡",
- "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "极速",
- "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化乘法的分组大小配置\n\n• 高精度 = 分组 32\n• 均衡 = 分组 64\n• 极速 = 分组 128\n",
- "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "达到此上下文长度后开始量化",
- "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文长度达到该值时,开始对 KV 缓存进行量化",
+ "llm.load.mlx.kvCacheQuantization/bits/tooltip": "KV 缓存量化的位数",
+ "llm.load.mlx.kvCacheQuantization/bits/bits": "位",
+ "llm.load.mlx.kvCacheQuantization/groupSize/title": "组大小策略",
+ "llm.load.mlx.kvCacheQuantization/groupSize/accuracy": "精度",
+ "llm.load.mlx.kvCacheQuantization/groupSize/balanced": "平衡",
+ "llm.load.mlx.kvCacheQuantization/groupSize/speedy": "快速",
+ "llm.load.mlx.kvCacheQuantization/groupSize/tooltip": "高级:量化 '矩阵乘法组大小' 配置\n\n• 精度 = 组大小 32\n• 平衡 = 组大小 64\n• 快速 = 组大小 128\n",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/title": "当上下文达到此长度时开始量化",
+ "llm.load.mlx.kvCacheQuantization/quantizedStart/tooltip": "当上下文达到此 Token 数量时,开始量化 KV 缓存",
"embedding.load.contextLength/title": "上下文长度",
- "embedding.load.contextLength/subTitle": "模型可以一次性关注的token最大数量。请参阅“推理参数”下的“对话溢出”选项以获取更多管理方式",
- "embedding.load.contextLength/info": "指定模型一次可以考虑的最大token数量,影响其处理过程中保留的上下文量",
- "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基",
- "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置嵌入(RoPE)的自定义基频。增加此值可能在高上下文长度下提高性能",
- "embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基频,影响位置信息的嵌入方式",
- "embedding.load.llama.evalBatchSize/title": "评估批处理大小",
- "embedding.load.llama.evalBatchSize/subTitle": "每次处理的输入token数量。增加此值会提高性能,但会增加内存使用量",
- "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的token数量",
- "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率比例",
- "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放,以使用 RoPE 扩展有效上下文",
- "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放,以控制位置编码的粒度",
+ "embedding.load.contextLength/subTitle": "模型在一次提示中可以关注到的最大 Token 数。查看\"推理参数\"下的对话溢出选项以了解更多管理方式",
+ "embedding.load.contextLength/info": "指定模型一次可以考虑的最大 Token 数量,影响其在处理期间保留的上下文量",
+ "embedding.load.llama.ropeFrequencyBase/title": "RoPE 频率基数",
+ "embedding.load.llama.ropeFrequencyBase/subTitle": "旋转位置编码(RoPE)的自定义基础频率。增加此值可能在高上下文长度下获得更好的性能",
+ "embedding.load.llama.ropeFrequencyBase/info": "[高级] 调整旋转位置编码的基础频率,影响位置信息的嵌入方式",
+ "embedding.load.llama.evalBatchSize/title": "评估批次大小",
+ "embedding.load.llama.evalBatchSize/subTitle": "一次处理的输入 Token 数量。增加此值会提高性能但会增加内存使用",
+ "embedding.load.llama.evalBatchSize/info": "设置评估期间一起处理的 Token 数量",
+ "embedding.load.llama.ropeFrequencyScale/title": "RoPE 频率缩放",
+ "embedding.load.llama.ropeFrequencyScale/subTitle": "上下文长度按此因子缩放以使用 RoPE 扩展有效上下文",
+ "embedding.load.llama.ropeFrequencyScale/info": "[高级] 修改旋转位置编码的频率缩放以控制位置编码粒度",
"embedding.load.llama.acceleration.offloadRatio/title": "GPU 卸载",
- "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的离散模型层数",
- "embedding.load.llama.acceleration.offloadRatio/info": "设置卸载到 GPU 的层数。",
- "embedding.load.llama.keepModelInMemory/title": "保持模型在内存中",
- "embedding.load.llama.keepModelInMemory/subTitle": "即使模型卸载到 GPU 也预留系统内存。提高性能但需要更多的系统 RAM",
- "embedding.load.llama.keepModelInMemory/info": "防止模型交换到磁盘,确保更快的访问,但以更高的 RAM 使用率为代价",
+ "embedding.load.llama.acceleration.offloadRatio/subTitle": "用于 GPU 加速的 GPU 计算的离散模型层数",
+ "embedding.load.llama.acceleration.offloadRatio/info": "设置要卸载到 GPU 的层数。",
+ "embedding.load.llama.keepModelInMemory/title": "将模型保留在内存中",
+ "embedding.load.llama.keepModelInMemory/subTitle": "即使卸载到 GPU 也为模型保留系统内存。提升性能但需要更多系统内存",
+ "embedding.load.llama.keepModelInMemory/info": "防止模型被交换到磁盘,确保更快访问但代价是更高的内存使用",
"embedding.load.llama.tryMmap/title": "尝试 mmap()",
- "embedding.load.llama.tryMmap/subTitle": "提高模型的加载时间。禁用此功能可能在模型大于可用系统 RAM 时提高性能",
+ "embedding.load.llama.tryMmap/subTitle": "改善模型加载时间。禁用此选项可能在模型大于可用系统内存时提升性能",
"embedding.load.llama.tryMmap/info": "直接从磁盘加载模型文件到内存",
- "embedding.load.seed/title": "种子",
- "embedding.load.seed/subTitle": "用于文本生成的随机数生成器的种子。-1 表示随机种子",
+ "embedding.load.seed/title": "随机种子",
+ "embedding.load.seed/subTitle": "文本生成中使用的随机数生成器种子。-1 为随机种子",
- "embedding.load.seed/info": "随机种子:设置随机数生成的种子以确保可重复的结果",
+ "embedding.load.seed/info": "随机种子:设置随机数生成器的种子以确保可复现的结果",
"presetTooltip": {
"included/title": "预设值",
- "included/description": "以下字段将会被应用",
- "included/empty": "在此上下文中,此预设没有适用的字段。",
- "included/conflict": "您将被要求选择是否应用此值",
+ "included/description": "以下字段将被应用",
+ "included/empty": "此上下文中没有应用此预设的字段。",
+ "included/conflict": "系统将询问你是否应用此值",
"separateLoad/title": "加载时配置",
- "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是全模型范围的,并且需要重新加载模型才能生效。按住",
+ "separateLoad/description.1": "预设还包含以下加载时配置。加载时配置是模型范围的,需要重新加载模型才能生效。按住",
"separateLoad/description.2": "应用到",
"separateLoad/description.3": "。",
"excluded/title": "可能不适用",
- "excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
+ "excluded/description": "以下字段包含在预设中,但在当前上下文中不适用。",
"legacy/title": "旧版预设",
- "legacy/description": "这是一个旧版预设。它包括以下字段,这些字段现在要么自动处理,要么不再适用。",
+ "legacy/description": "这是一个旧版预设。它包含以下字段,这些字段现在要么自动处理,要么不再适用。",
"button/publish": "发布到 Hub",
"button/pushUpdate": "推送更改到 Hub",
- "button/noChangesToPush": "没有可推送的更改",
+ "button/noChangesToPush": "没有要推送的更改",
"button/export": "导出",
- "hubLabel": "来自 {{user}} 的 Hub 预设",
- "ownHubLabel": "您的 Hub 预设"
+ "hubLabel": "来自 Hub 的预设,作者 {{user}}",
+ "ownHubLabel": "你来自 Hub 的预设"
},
"customInputs": {
@@ -264,13 +271,13 @@
},
"llmPromptTemplate": {
"type": "类型",
- "types.jinja/label": "模板 (Jinja)",
- "jinja.bosToken/label": "开始token (BOS Token)",
- "jinja.eosToken/label": "结束token (EOS Token)",
+ "types.jinja/label": "模板(Jinja)",
+ "jinja.bosToken/label": "BOS Token",
+ "jinja.eosToken/label": "EOS Token",
"jinja.template/label": "模板",
- "jinja/error": "解析 Jinja 模板失败: {{error}}",
- "jinja/empty": "请在上方输入一个 Jinja 模板。",
- "jinja/unlikelyToWork": "您提供的 Jinja 模板很可能无法正常工作,因为它没有引用变量 \"messages\"。请检查您输入的模板是否正确。",
+ "jinja/error": "解析 Jinja 模板失败:{{error}}",
+ "jinja/empty": "请在上方输入 Jinja 模板。",
+ "jinja/unlikelyToWork": "你提供的 Jinja 模板不太可能正常工作,因为它没有引用变量 \"messages\"。请仔细检查是否输入了正确的模板。",
"types.manual/label": "手动",
"manual.subfield.beforeSystem/label": "系统前缀",
"manual.subfield.beforeSystem/placeholder": "输入系统前缀...",
@@ -285,21 +292,21 @@
"manual.subfield.afterAssistant/label": "助手后缀",
"manual.subfield.afterAssistant/placeholder": "输入助手后缀...",
"stopStrings/label": "额外停止字符串",
- "stopStrings/subTitle": "除了用户指定的停止字符串之外,还将使用特定于模板的停止字符串。"
+ "stopStrings/subTitle": "模板特定的停止字符串,将附加在用户指定的停止字符串之外。"
},
"contextLength": {
- "maxValueTooltip": "这是模型训练所能处理的最大token数量。点击以将上下文设置为此值",
+ "maxValueTooltip": "这是模型训练时能够处理的最大 Token 数。点击以将上下文设置为此值",
"maxValueTextStart": "模型支持最多",
- "maxValueTextEnd": "个token",
- "tooltipHint": "尽管模型可能支持一定数量的token,但如果您的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
+ "maxValueTextEnd": "个 Token",
+ "tooltipHint": "虽然模型可能支持一定数量的 Token,但如果你的机器资源无法处理负载,性能可能会下降 - 增加此值时请谨慎"
},
"contextOverflowPolicy": {
- "stopAtLimit": "到达限制时停止",
- "stopAtLimitSub": "一旦模型的内存满载即停止生成",
+ "stopAtLimit": "达到限制时停止",
+ "stopAtLimitSub": "一旦模型内存满就停止生成",
"truncateMiddle": "截断中间",
- "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍然会记住对话的开头",
+ "truncateMiddleSub": "从对话中间移除消息以为新消息腾出空间。模型仍会记住对话的开头",
"rollingWindow": "滚动窗口",
- "rollingWindowSub": "模型将始终接收最近的几条消息,但可能会忘记对话的开头"
+ "rollingWindowSub": "模型将始终获取最新的几条消息,但可能会忘记对话的开头"
},
"llamaAccelerationOffloadRatio": {
"max": "最大",
@@ -310,151 +317,151 @@
"favorMainGpu": "优先主 GPU"
},
"speculativeDecodingDraftModel": {
- "readMore": "了解工作原理",
+ "readMore": "阅读工作原理",
"placeholder": "选择兼容的草稿模型",
- "noCompatible": "当前模型选择下未找到兼容的草稿模型",
+ "noCompatible": "当前模型选择没有找到兼容的草稿模型",
"stillLoading": "正在识别兼容的草稿模型...",
- "notCompatible": "所选草稿模型()与当前模型选择()不兼容。",
+ "notCompatible": "选定的草稿模型()与当前模型选择()不兼容。",
"off": "关闭",
"loadModelToSeeOptions": "加载模型 以查看兼容选项",
- "compatibleWithNumberOfModels": "推荐用于至少 {{dynamicValue}} 个模型",
+ "compatibleWithNumberOfModels": "推荐用于至少 {{dynamicValue}} 个你的模型",
"recommendedForSomeModels": "推荐用于部分模型",
"recommendedForLlamaModels": "推荐用于 Llama 模型",
"recommendedForQwenModels": "推荐用于 Qwen 模型",
"onboardingModal": {
- "introducing": "新功能介绍",
- "speculativeDecoding": "投机解码",
- "firstStepBody": "llama.cpp 和 MLX 模型推理加速",
- "secondStepTitle": "投机解码能够加速推理",
- "secondStepBody": "投机解码是一种让两个模型协作的技术:\n - 一个规模较大的“主”模型\n - 一个较小的“草稿”模型\n\n生成过程中,草稿模型会快速提出token,由主模型进行验证。验证的过程比实际生成更快。\n**通常,主模型与草稿模型的体积差距越大,加速效果越明显。**\n\n为了保证质量,主模型只会接受与自身结果一致的token,从而实现大模型的响应质量与更快的推理速度。两个模型必须使用相同的词表。",
+ "introducing": "介绍",
+ "speculativeDecoding": "推测解码",
+ "firstStepBody": "llama.cpp 和 MLX 模型的推理加速",
+ "secondStepTitle": "使用推测解码加速推理",
+ "secondStepBody": "推测解码是一种涉及两个模型协作的技术:\n - 一个较大的\"主\"模型\n - 一个较小的\"草稿\"模型\n\n在生成过程中,草稿模型快速提议 Token 供较大的主模型验证。验证 Token 比实际生成它们快得多,这就是速度提升的来源。**一般来说,主模型和草稿模型之间的尺寸差异越大,加速效果越明显**。\n\n为了保持质量,主模型只接受与其自身会生成的 Token 一致的 Token,从而以更快的推理速度实现较大模型的回复质量。两个模型必须共享相同的词汇表。",
"draftModelRecommendationsTitle": "草稿模型推荐",
- "basedOnCurrentModels": "基于您当前的模型",
+ "basedOnCurrentModels": "基于你当前的模型",
"close": "关闭",
"next": "下一步",
"done": "完成"
},
"speculativeDecodingLoadModelToSeeOptions": "请先加载模型 ",
- "errorEngineNotSupported": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎()并重新加载模型以使用此功能。",
- "errorEngineNotSupported/noKey": "投机解码需引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎并重新加载模型以使用此功能。"
+ "errorEngineNotSupported": "推测解码需要引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎()并重新加载模型以使用此功能。",
+ "errorEngineNotSupported/noKey": "推测解码需要引擎 {{engineName}} 至少版本 {{minVersion}}。请更新引擎并重新加载模型以使用此功能。"
},
"llmReasoningParsing": {
"startString/label": "起始字符串",
- "startString/placeholder": "请输入起始字符串...",
+ "startString/placeholder": "输入起始字符串...",
"endString/label": "结束字符串",
- "endString/placeholder": "请输入结束字符串..."
+ "endString/placeholder": "输入结束字符串..."
}
},
"saveConflictResolution": {
"title": "选择要包含在预设中的值",
- "description": "挑选并选择要保留的值",
- "instructions": "点击一个值以包含它",
- "userValues": "先前的值",
+ "description": "挑选要保留的值",
+ "instructions": "点击值以包含它",
+ "userValues": "之前的值",
"presetValues": "新值",
"confirm": "确认",
"cancel": "取消"
},
"applyConflictResolution": {
"title": "保留哪些值?",
- "description": "您有未提交的更改与即将应用的预设有重叠",
- "instructions": "点击一个值以保留它",
+ "description": "你有未提交的更改与传入的预设重叠",
+ "instructions": "点击值以保留它",
"userValues": "当前值",
- "presetValues": "即将应用的预设值",
+ "presetValues": "传入的预设值",
"confirm": "确认",
"cancel": "取消"
},
"empty": "<空>",
"noModelSelected": "未选择模型",
"apiIdentifier.label": "API 标识符",
- "apiIdentifier.hint": "可选,为此模型提供一个标识符。该标识符将在 API 请求中使用。留空则使用默认标识符。",
- "idleTTL.label": "空闲时自动卸载",
- "idleTTL.hint": "如设置,模型在空闲指定时间后将自动卸载。",
+ "apiIdentifier.hint": "可选地为此模型提供标识符。这将用于 API 请求。留空以使用默认标识符。",
+ "idleTTL.label": "空闲时自动卸载(TTL)",
+ "idleTTL.hint": "如果设置,模型将在空闲指定时间后自动卸载。",
"idleTTL.mins": "分钟",
"presets": {
"title": "预设",
- "commitChanges": "提交更改",
- "commitChanges/description": "将您的更改提交给预设。",
- "commitChanges.manual": "检测到新的字段。您将能够选择要包含在预设中的更改。",
- "commitChanges.manual.hold.0": "按住",
- "commitChanges.manual.hold.1": "选择要提交给预设的更改。",
- "commitChanges.saveAll.hold.0": "按住",
- "commitChanges.saveAll.hold.1": "保存所有更改。",
- "commitChanges.saveInPreset.hold.0": "按住",
- "commitChanges.saveInPreset.hold.1": "仅保存已经包含在预设中的字段的更改。",
- "commitChanges/error": "未能将更改提交给预设。",
- "commitChanges.manual/description": "选择要包含在预设中的更改。",
+ "saveChanges": "保存",
+ "saveChanges/description": "保存你对预设的更改。",
+ "saveChanges.manual": "检测到新字段。你将能够选择要包含在预设中的更改。",
+ "saveChanges.manual.hold.0": "按住",
+ "saveChanges.manual.hold.1": "以选择要保存到预设的更改。",
+ "saveChanges.saveAll.hold.0": "按住",
+ "saveChanges.saveAll.hold.1": "以保存所有更改。",
+ "saveChanges.saveInPreset.hold.0": "按住",
+ "saveChanges.saveInPreset.hold.1": "以仅保存已包含在预设中的字段的更改。",
+ "saveChanges/error": "保存预设更改失败。",
+ "saveChanges.manual/description": "选择要包含在预设中的更改。",
"saveAs": "另存为新预设...",
- "presetNamePlaceholder": "为预设输入一个名称...",
- "cannotCommitChangesLegacy": "这是一个旧版预设,无法修改。您可以使用“另存为新预设...”创建一个副本。",
- "cannotCommitChangesNoChanges": "没有更改可以提交。",
- "emptyNoUnsaved": "选择一个预设...",
+ "presetNamePlaceholder": "输入预设名称...",
+ "cannotCommitChangesLegacy": "这是旧版预设,无法修改。你可以使用\"另存为新预设...\"创建副本。",
+ "cannotSaveChangesNoChanges": "没有要保存的更改。",
+ "emptyNoUnsaved": "选择预设...",
"emptyWithUnsaved": "未保存的预设",
- "saveEmptyWithUnsaved": "保存预设为...",
+ "saveEmptyWithUnsaved": "另存为预设...",
"saveConfirm": "保存",
"saveCancel": "取消",
- "saving": "正在保存...",
- "save/error": "未能保存预设。",
+ "saving": "保存中...",
+ "save/error": "保存预设失败。",
"deselect": "取消选择预设",
"deselect/error": "取消选择预设失败。",
"select/error": "选择预设失败。",
"delete/error": "删除预设失败。",
- "discardChanges": "丢弃未保存的更改",
- "discardChanges/info": "丢弃所有未提交的更改并恢复预设至原始状态",
- "newEmptyPreset": "创建新的空预设...",
+ "discardChanges": "放弃未保存的更改",
+ "discardChanges/info": "放弃所有未保存的更改并将预设恢复到原始状态",
+ "newEmptyPreset": "+ 新建预设",
"importPreset": "导入",
"contextMenuCopyIdentifier": "复制预设标识符",
- "contextMenuSelect": "选择预设",
- "contextMenuDelete": "删除",
- "contextMenuShare": "发布中...",
- "contextMenuOpenInHub": "在 Hub 上查看",
- "contextMenuPullFromHub": "拉取最新版本",
+ "contextMenuSelect": "应用预设",
+ "contextMenuDelete": "删除...",
+ "contextMenuShare": "发布...",
+ "contextMenuOpenInHub": "在网页上查看",
+ "contextMenuPullFromHub": "拉取最新",
"contextMenuPushChanges": "推送更改到 Hub",
- "contextMenuPushingChanges": "正在推送...",
+ "contextMenuPushingChanges": "推送中...",
"contextMenuPushedChanges": "更改已推送",
"contextMenuExport": "导出文件",
"contextMenuRevealInExplorer": "在文件资源管理器中显示",
"contextMenuRevealInFinder": "在 Finder 中显示",
"share": {
"title": "发布预设",
- "action": "分享你的预设,让他人下载、点赞和fork",
+ "action": "分享你的预设供他人下载、点赞和分叉",
"presetOwnerLabel": "所有者",
- "uploadAs": "你的预设将以 {{name}} 创建",
+ "uploadAs": "你的预设将作为 {{name}} 创建",
"presetNameLabel": "预设名称",
"descriptionLabel": "描述(可选)",
- "loading": "正在发布...",
- "success": "预设已成功发布",
- "presetIsLive": " 已在 Hub 上发布!",
+ "loading": "发布中...",
+ "success": "预设已成功推送",
+ "presetIsLive": " 现已在 Hub 上线!",
"close": "关闭",
"confirmViewOnWeb": "在网页上查看",
- "confirmCopy": "复制链接",
+ "confirmCopy": "复制 URL",
"confirmCopied": "已复制!",
"pushedToHub": "你的预设已推送到 Hub",
- "descriptionPlaceholder": "请输入描述...",
- "willBePublic": "发布你的预设将使其公开",
- "willBePrivate": "仅您可见",
- "willBeOrgVisible": "组织内成员均可见",
- "publicSubtitle": "你的预设现在为 公开。其他人可以在 lmstudio.ai 下载和 fork 它",
- "privateUsageReached": "私有预设的数量已达上限",
- "continueInBrowser": "在浏览器继续",
+ "descriptionPlaceholder": "输入描述...",
+ "willBePublic": "此预设将是公开的。互联网上的任何人都能看到它。",
+ "willBePrivate": "只有你能看到此预设",
+ "willBeOrgVisible": "此预设将对组织中的所有人可见。",
+ "publicSubtitle": "你的预设是公开的。其他人可以在 lmstudio.ai 上下载和分叉它",
+ "privateUsageReached": "已达到私有预设数量限制。",
+ "continueInBrowser": "在浏览器中继续",
"confirmShareButton": "发布",
- "error": "预设发布失败",
- "createFreeAccount": "请在 Hub 创建免费账号以发布预设"
+ "error": "发布预设失败",
+ "createFreeAccount": "在 Hub 创建免费账户以发布预设"
},
"update": {
"title": "推送更改到 Hub",
"title/success": "预设已成功更新",
- "subtitle": "修改 并推送到 Hub",
+ "subtitle": "对 进行更改并推送到 Hub",
"descriptionLabel": "描述",
- "descriptionPlaceholder": "请输入描述...",
- "loading": "正在推送...",
+ "descriptionPlaceholder": "输入描述...",
+ "loading": "推送中...",
"cancel": "取消",
- "createFreeAccount": "请在 Hub 创建免费账号以发布预设",
+ "createFreeAccount": "在 Hub 创建免费账户以发布预设",
"error": "推送更新失败",
"confirmUpdateButton": "推送"
},
"resolve": {
"title": "解决冲突...",
- "tooltip": "打开窗口以解决与 Hub 版本的差异"
+ "tooltip": "打开模态框以解决与 Hub 版本的差异"
},
"loginToManage": {
"title": "登录以管理..."
@@ -462,44 +469,44 @@
"downloadFromHub": {
"title": "下载",
"downloading": "下载中...",
- "success": "下载完成!",
+ "success": "已下载!",
"error": "下载失败"
},
"push": {
"title": "推送更改",
"pushing": "推送中...",
- "success": "推送成功!",
- "tooltip": "将本地更改推送到 Hub 上托管的远程版本",
+ "success": "已推送",
+ "tooltip": "将你的本地更改推送到 Hub 上托管的远程版本",
"error": "推送失败"
},
"saveAsNewModal": {
"title": "哎呀!在 Hub 上未找到预设",
- "confirmSaveAsNewDescription": "您是否希望将此预设作为新版本发布?",
- "confirmButton": "作为新版本发布"
+ "confirmSaveAsNewDescription": "你想将此预设作为新预设发布吗?",
+ "confirmButton": "作为新预设发布"
},
"pull": {
- "title": "拉取最新版本",
+ "title": "拉取最新",
"error": "拉取失败",
"contextMenuErrorMessage": "拉取失败",
"success": "已拉取",
"pulling": "拉取中...",
- "upToDate": "已是最新版本!",
+ "upToDate": "已是最新!",
"unsavedChangesModal": {
"title": "你有未保存的更改。",
- "bodyContent": "从远程拉取的内容将覆盖您的未保存更改,是否继续?",
+ "bodyContent": "从远程拉取将覆盖你未保存的更改。继续?",
"confirmButton": "覆盖未保存的更改"
}
},
"import": {
"title": "从文件导入预设",
- "dragPrompt": "拖拽预设 JSON 文件或从电脑选择",
+ "dragPrompt": "拖放预设文件(.tar.gz 或 preset.json)或 从计算机选择",
"remove": "移除",
"cancel": "取消",
"importPreset_zero": "导入预设",
"importPreset_one": "导入预设",
"importPreset_other": "导入 {{count}} 个预设",
"selectDialog": {
- "title": "选择预设文件(.json 或者 .tar.gz)",
+ "title": "选择预设文件(preset.json 或 .tar.gz)",
"button": "导入"
},
"error": "导入预设失败",
@@ -509,8 +516,8 @@
"titleFailSection_zero": "",
"titleFailSection_one": "({{count}} 个失败)",
"titleFailSection_other": "({{count}} 个失败)",
- "titleAllFailed": "预设导入失败",
- "importMore": "继续导入",
+ "titleAllFailed": "导入预设失败",
+ "importMore": "导入更多",
"close": "完成",
"successBadge": "成功",
"alreadyExistsBadge": "预设已存在",
@@ -520,26 +527,26 @@
"errorViewDetailsButton": "查看详情",
"seeError": "查看错误",
"noName": "无预设名称",
- "useInChat": "在聊天中使用"
+ "useInChat": "在对话中使用"
},
"importFromUrl": {
"button": "从 URL 导入...",
"title": "从 URL 导入",
"back": "从文件导入...",
- "action": "请在下方粘贴你要导入的 LM Studio Hub 预设链接",
- "invalidUrl": "无效的 URL,请确保输入的是有效的 LM Studio Hub 预设链接。",
- "tip": "你也可以在 LM Studio Hub 直接点击 {{buttonName}} 按钮安装该预设",
+ "action": "在下面粘贴你要导入的预设的 LM Studio Hub URL",
+ "invalidUrl": "无效的 URL。请确保你粘贴的是正确的 LM Studio Hub URL。",
+ "tip": "你可以直接在 LM Studio Hub 中使用 {{buttonName}} 按钮安装预设",
"confirm": "导入",
"cancel": "取消",
- "loading": "正在导入...",
+ "loading": "导入中...",
"error": "下载预设失败。"
}
},
"download": {
"title": "从 LM Studio Hub 拉取 ",
- "subtitle": "保存 到你的预设。保存后你可以在应用中使用此预设",
+ "subtitle": "将 保存到你的预设。这样做将允许你在应用中使用此预设",
"button": "拉取",
- "button/loading": "正在拉取...",
+ "button/loading": "拉取中...",
"cancel": "取消",
"error": "下载预设失败。"
},
@@ -548,8 +555,8 @@
}
},
- "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能会导致某些模型出现问题。如果您遇到问题,请尝试禁用它。",
- "llamaKvCacheQuantizationWarning": "KV 缓存量化是一项实验性功能,可能会导致某些模型出现问题。V 缓存量化必须启用 Flash Attention。如果遇到问题,请将默认值重置为\"F16\"。",
+ "flashAttentionWarning": "Flash Attention 是一项实验性功能,可能在某些模型上导致问题。如果遇到问题,请尝试禁用它。",
+ "llamaKvCacheQuantizationWarning": "KV 缓存量化是一项实验性功能,可能在某些模型上导致问题。必须启用 Flash Attention 才能进行 V 缓存量化。如果遇到问题,请重置为默认的 \"F16\"。",
"seedUncheckedHint": "随机种子",
"ropeFrequencyBaseUncheckedHint": "自动",
@@ -557,26 +564,26 @@
"hardware": {
"environmentVariables": "环境变量",
- "environmentVariables.info": "如果不确定,请保留默认值",
+ "environmentVariables.info": "如果不确定,请保持这些为默认值",
"environmentVariables.reset": "重置为默认值",
-
- "gpus.information": "配置检测到的图形处理单元 (GPU)",
+
+ "gpus.information": "配置你机器上检测到的图形处理单元(GPU)",
"gpuSettings": {
"editMaxCapacity": "编辑最大容量",
- "hideEditMaxCapacity": "隐藏最大容量编辑",
- "allOffWarning": "所有 GPU 均已关闭或禁用,请确保分配了至少一个 GPU 以加载模型",
+ "hideEditMaxCapacity": "隐藏编辑最大容量",
+ "allOffWarning": "所有 GPU 都已关闭或禁用,确保有一些 GPU 分配以启用加载模型",
"split": {
- "title": "分配策略",
- "placeholder": "选择 GPU 内存分配方式",
+ "title": "策略",
+ "placeholder": "选择 GPU 内存分配",
"options": {
- "generalDescription": "配置模型将如何加载到您的 GPU 上",
+ "generalDescription": "配置模型将如何加载到你的 GPU 上",
"evenly": {
"title": "均匀分配",
- "description": "在多个 GPU 之间均匀分配内存"
+ "description": "在 GPU 之间均匀分配内存"
},
"priorityOrder": {
- "title": "按顺序填充",
- "description": "先在第一个 GPU 上分配内存,然后依次分配到后续 GPU"
+ "title": "优先顺序",
+ "description": "拖动以重新排序优先级。系统将尝试在列在前面的 GPU 上分配更多"
},
"custom": {
"title": "自定义",
@@ -586,7 +593,7 @@
}
},
"deviceId.info": "此设备的唯一标识符",
- "changesOnlyAffectNewlyLoadedModels": "更改仅影响新加载的模型",
+ "changesOnlyAffectNewlyLoadedModels": "更改仅会影响新加载的模型",
"toggleGpu": "启用/禁用 GPU"
}
},
@@ -596,9 +603,9 @@
"envVars": {
"select": {
"placeholder": "选择环境变量...",
- "noOptions": "无更多可用选项",
+ "noOptions": "没有更多可用选项",
"filter": {
- "placeholder": "过滤搜索结果",
+ "placeholder": "筛选搜索结果",
"resultsFound_zero": "未找到结果",
"resultsFound_one": "找到 1 个结果",
"resultsFound_other": "找到 {{count}} 个结果"
diff --git a/zh-CN/developer.json b/zh-CN/developer.json
index 9ed7ec42..a0c318e5 100644
--- a/zh-CN/developer.json
+++ b/zh-CN/developer.json
@@ -1,110 +1,119 @@
{
"tabs/server": "本地服务器",
- "tabs/extensions": "LM 运行环境",
+ "tabs/extensions": "LM 运行时",
"loadSettings/title": "加载设置",
- "modelSettings/placeholder": "选择一个模型进行配置",
+ "modelSettings/placeholder": "未选择模型",
- "loadedModels/noModels": "没有已加载的模型",
+ "loadedModels/noModels": "未加载模型",
"serverOptions/title": "服务器选项",
"serverOptions/configurableTitle": "可配置选项",
- "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
+ "serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果端口已被占用,你可能需要更改此设置。",
"serverOptions/port/subtitle": "监听的端口",
"serverOptions/autostart/title": "自动启动服务器",
- "serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
+ "serverOptions/autostart/hint": "在应用或服务启动时自动开启 LM Studio 的本地大语言模型服务器",
"serverOptions/port/integerWarning": "端口号必须是整数",
- "serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
+ "serverOptions/port/invalidPortWarning": "端口必须在 1 到 65535 之间",
"serverOptions/cors/title": "启用 CORS",
- "serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
- "serverOptions/cors/hint2": "当从网页或 VS Code 或其他扩展发起请求时,可能需要启用 CORS。",
- "serverOptions/cors/subtitle": "允许跨源请求",
- "serverOptions/network/title": "在网络中提供服务",
- "serverOptions/network/subtitle": "向网络中的设备开放服务器",
- "serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
- "serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
- "serverOptions/verboseLogging/title": "详细日志记录",
- "serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
- "serverOptions/contentLogging/title": "记录提示和响应",
- "serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
- "serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
- "serverOptions/redactContent/title": "内容脱敏",
- "serverOptions/redactContent/hint": "启用后,可防止敏感数据(如请求和响应内容)被记录在日志中。",
- "serverOptions/logIncomingTokens/title": "记录传入的 Token",
- "serverOptions/logIncomingTokens/hint": "是否在生成过程中记录每个 Token",
+ "serverOptions/cors/hint1": "启用 CORS(跨域资源共享)将允许你访问的网站向 LM Studio 服务器发送请求。",
+ "serverOptions/cors/hint2": "从网页或 VS Code / 其他扩展发送请求时可能需要 CORS。",
+ "serverOptions/cors/subtitle": "允许跨域请求",
+ "serverOptions/network/title": "在本地网络上提供服务",
+ "serverOptions/network/subtitle": "向网络上的设备暴露服务器",
+ "serverOptions/network/hint1": "是否允许来自网络上的其他设备的连接。",
+ "serverOptions/network/hint2": "如果未勾选,服务器将仅监听 localhost。",
+ "serverOptions/verboseLogging/title": "详细日志",
+ "serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志",
+ "serverOptions/contentLogging/title": "记录提示词和响应",
+ "serverOptions/contentLogging/subtitle": "本地请求/响应日志设置",
+ "serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示词和/或响应。",
+ "serverOptions/redactContent/title": "编辑内容",
+ "serverOptions/redactContent/hint": "启用后,防止敏感数据(如请求和响应的内容)被记录。",
+ "serverOptions/logIncomingTokens/title": "记录传入 Token",
+ "serverOptions/logIncomingTokens/hint": "是否在生成过程中记录每个 Token。",
"serverOptions/fileLoggingMode/title": "文件日志模式",
"serverOptions/fileLoggingMode/off/title": "关闭",
"serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
"serverOptions/fileLoggingMode/succinct/title": "简洁",
"serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
"serverOptions/fileLoggingMode/full/title": "完整",
- "serverOptions/fileLoggingMode/full/hint": "不对长请求进行截断。",
- "serverOptions/jitModelLoading/title": "JIT(即时)模型加载",
- "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
+ "serverOptions/fileLoggingMode/full/hint": "不截断长请求。",
+ "serverOptions/jitModelLoading/title": "即时模型加载",
+ "serverOptions/jitModelLoading/hint": "启用后,如果请求指定了未加载的模型,它将自动加载并使用。此外,\"/v1/models\" 端点也将包含尚未加载的模型。",
"serverOptions/loadModel/error": "加载模型失败",
"serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的即时加载模型",
- "serverOptions/jitModelLoadingTTL/hint": "通过 API 请求即时加载的模型,若在一段时间内未被使用,将会被自动卸载(TTL)",
+ "serverOptions/jitModelLoadingTTL/hint": "为服务 API 请求而即时加载(JIT)的模型将在闲置一段时间后(TTL)自动卸载。",
"serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲 TTL",
"serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
"serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后一个即时加载的模型",
- "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任意时刻最多只有一个即时加载的模型(会卸载之前的模型)",
+ "serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任何给定时间最多只有 1 个模型通过 JIT 加载(卸载之前的模型)",
+ "serverOptions/allowMcp/title": "允许远程 MCP",
+ "serverOptions/allowMcp/hint": "允许使用不在你的 mcp.json 中的 MCP。这些 MCP 连接是临时的,仅在请求存在期间存在。目前,仅支持远程 MCP。",
+ "serverOptions/allowMcp/mode/off": "关闭",
+ "serverOptions/allowMcp/mode/off/hint": "不允许服务器请求使用 MCP",
+ "serverOptions/allowMcp/mode/remote": "远程",
+ "serverOptions/allowMcp/mode/remote/hint": "允许连接到远程 MCP 服务器",
+
+ "serverOptions/start/error": "启动服务器失败",
+ "serverOptions/stop/error": "停止服务器失败",
"serverLogs/scrollToBottom": "跳转到底部",
- "serverLogs/clearLogs": "清除日志 ({{shortcut}})",
+ "serverLogs/clearLogs": "清除日志({{shortcut}})",
"serverLogs/openLogsFolder": "打开服务器日志文件夹",
- "runtimeSettings/title": "运行环境设置",
- "runtimeSettings/chooseRuntime/title": "配置运行环境",
- "runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
- "runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
- "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
- "runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",
+ "runtimeSettings/title": "运行时设置",
+ "runtimeSettings/chooseRuntime/title": "选择",
+ "runtimeSettings/chooseRuntime/description": "为每种模型格式选择要使用的引擎",
+ "runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有扩展包",
+ "runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 仅显示每个扩展包的最新版本。启用此选项以查看所有可用的扩展包。",
+ "runtimeSettings/chooseRuntime/select/placeholder": "选择引擎",
"runtimeSettings/chooseFrameworks/title": "框架",
- "runtimeSettings/chooseFrameworks/description": "为每个功能选择要使用的框架",
+ "runtimeSettings/chooseFrameworks/description": "为每种功能选择要使用的框架",
"runtimeSettings/chooseFramework/documentParser/builtIn/label": "内置解析器",
"runtimeSettings/chooseFramework/documentParser/select/label": "文档解析器",
- "runtimeSettings/chooseFramework/documentParser/select/placeholder": "请选择文档解析器",
+ "runtimeSettings/chooseFramework/documentParser/select/placeholder": "选择文档解析器",
"runtimeOptions/uninstall": "卸载",
"runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
- "runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
- "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
- "runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
+ "runtimeOptions/uninstallDialog/body": "卸载此运行时将将其从系统中移除。此操作不可逆。",
+ "runtimeOptions/uninstallDialog/body/caveats": "某些文件可能仅在 LM Studio 重启后才能移除。",
+ "runtimeOptions/uninstallDialog/error": "卸载运行时失败",
"runtimeOptions/uninstallDialog/confirm": "继续并卸载",
"runtimeOptions/uninstallDialog/cancel": "取消",
- "runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
- "runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
- "runtimeOptions/noRuntimes": "未找到运行环境",
+ "runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行时",
+ "runtimeOptions/downloadIncompatibleRuntime": "此运行时被确定为与你的机器不兼容。它很可能无法工作。",
+ "runtimeOptions/noRuntimes": "未找到运行时",
"runtimes": {
- "manageLMRuntimes": "管理 LM 运行环境",
+ "manageLMRuntimes": "管理 LM 运行时",
"includeOlderRuntimeVersions": "包含旧版本",
"dismiss": "关闭",
"updateAvailableToast": {
- "title": "LM 运行环境更新可用!"
+ "title": "LM 运行时有可用更新!"
},
"updatedToast": {
- "title": " ✅ LM 运行环境已更新:{{runtime}} → v{{version}}",
- "preferencesUpdated": "新加载的 {{compatibilityTypes}} 模型将使用更新后的运行环境。"
+ "title": " ✅ LM 运行时已更新:{{runtime}} → v{{version}}",
+ "preferencesUpdated": "新加载的 {{compatibilityTypes}} 模型将使用更新的运行时。"
},
- "noAvx2ErrorMessage": "所有 LM 运行环境当前都需要支持 AVX2 指令集的 CPU",
+ "noAvx2ErrorMessage": "所有 LM 运行时目前都需要支持 AVX2 的 CPU",
"downloadableRuntimes": {
- "runtimeExtensionPacks": "运行环境扩展包",
+ "runtimeExtensionPacks": "运行时扩展包",
"refresh": "刷新",
"refreshing": "刷新中...",
"filterSegment": {
- "compatibleOnly": "仅兼容",
+ "compatibleOnly": "仅兼容的",
"all": "全部"
},
"card": {
- "releaseNotes": "版本说明",
+ "releaseNotes": "发布说明",
"latestVersionInstalled": "已安装最新版本",
- "updateAvailable": "更新可用"
+ "updateAvailable": "有可用更新"
}
},
"installedRuntimes": {
"manage": {
- "title": "管理可用的运行环境"
+ "title": "管理活动运行时"
},
"dropdownOptions": {
"installedVersions": "管理版本",
@@ -116,10 +125,10 @@
"engines": "我的引擎"
},
"detailsModal": {
- "installedVersions": "{{runtimeName}}的已安装版本",
- "manifestJsonTitle": "清单 JSON(高级)",
- "releaseNotesTitle": "版本说明",
- "noReleaseNotes": "该版本无可用的版本说明",
+ "installedVersions": "{{runtimeName}} 的已安装版本",
+ "manifestJsonTitle": "Manifest JSON(高级)",
+ "releaseNotesTitle": "发布说明",
+ "noReleaseNotes": "此版本没有可用的发布说明",
"back": "返回",
"close": "关闭"
},
@@ -128,15 +137,15 @@
}
},
- "inferenceParams/noParams": "此模型类型无可用配置的推理参数",
+ "inferenceParams/noParams": "此模型类型没有可配置的推理参数",
"quickDocs": {
- "tabChipTitle": "快速文档",
- "newToolUsePopover": "代码片段现已在“快速文档”中提供。点击此处开始使用工具!",
- "newToolUsePopoverTitle": "📚 快速文档",
- "learnMore": "ℹ️ 👾 要了解有关 LM Studio 本地服务器端的更多信息,请访问[文档](https://lmstudio.ai/docs)。",
+ "tabChipTitle": "开发者文档",
+ "newToolUsePopover": "代码片段现已在\"开发者文档\"中提供。点击此处开始使用工具使用功能!",
+ "newToolUsePopoverTitle": "📚 开发者文档",
+ "learnMore": "ℹ️ 👾 要了解更多关于 LM Studio 本地服务器端点的信息,请访问[文档](https://lmstudio.ai/docs)。",
"helloWorld": {
- "title": "你好,世界!"
+ "title": "Hello, World!"
},
"chat": {
"title": "聊天"
@@ -145,34 +154,43 @@
"title": "结构化输出"
},
"imageInput": {
- "title": "图像输入"
+ "title": "图片输入"
},
"embeddings": {
- "title": "文本嵌入"
+ "title": "嵌入"
},
"toolUse": {
"title": "工具使用",
"tab": {
- "saveAsPythonFile": "保存为Python文件",
+ "saveAsPythonFile": "保存为 Python 文件",
"runTheScript": "运行脚本:",
- "savePythonFileCopyPaste": "保存为Python文件以进行复制粘贴命令"
+ "savePythonFileCopyPaste": "保存为 Python 文件以复制粘贴命令"
}
},
- "newBadge": "新功能"
+ "newBadge": "新"
},
- "endpoints/openaiCompatRest/title": "支持的端点 (与 OpenAI 兼容的格式)",
- "endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
- "endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个token。注意:OpenAI 认为此端点已'弃用'。",
- "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应。",
+ "endpoints/openaiCompatRest/title": "支持的端点{{extra}}",
+ "endpoints/openaiCompatRest/segmentedLabel": "类 OpenAI",
+ "endpoints/openaiCompatRest/getModels": "列出当前加载的模型",
+ "endpoints/openaiCompatRest/postCompletions": "文本补全模式。根据提示预测下一个 Token。注意:OpenAI 认为此端点已\"弃用\"。",
+ "endpoints/openaiCompatRest/postChatCompletions": "聊天补全。将聊天历史发送给模型以预测下一个助手回复",
"endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",
+ "endpoints/openaiCompatRest/postResponses": "生成模型响应的高级接口。通过将之前响应的 id 作为输入传递给下一个来创建有状态的交互。",
+ "endpoints/lmStudioRest/segmentedLabel": "LM Studio",
+ "endpoints/lmStudioRestV1/getModels": "列出可用模型",
+ "endpoints/lmStudioRestV1/postModelsLoad": "加载带选项的模型",
+ "endpoints/lmStudioRestV1/postModelsDownload": "下载模型",
+ "endpoints/lmStudioRestV1/postChat": "与模型聊天。支持有状态的多轮对话和 MCP",
+ "endpoints/lmStudioRestV1/getModelsDownloadStatus": "获取模型下载状态",
+ "endpoints/anthropicCompatRest/segmentedLabel": "兼容 Anthropic",
- "model.createVirtualModelFromInstance": "另存为新的虚拟模型",
- "model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",
+ "model.createVirtualModelFromInstance": "将设置保存为新虚拟模型",
+ "model.createVirtualModelFromInstance/error": "将设置保存为新虚拟模型失败",
"model": {
"toolUseSectionTitle": "工具使用",
- "toolUseDescription": "检测到此模型经过工具使用的训练\n\n打开快速文档以了解更多信息。"
+ "toolUseDescription": "检测到该模型已针对工具使用进行训练\n\n打开开发者文档了解更多信息"
},
"apiConfigOptions/title": "API 配置"
diff --git a/zh-CN/discover.json b/zh-CN/discover.json
index 18e00a7c..6e614675 100644
--- a/zh-CN/discover.json
+++ b/zh-CN/discover.json
@@ -1,29 +1,29 @@
{
"collectionsColumn": "集合",
- "collectionsColumn/collectionError": "加载集合详情时出错,请尝试上方的刷新按钮",
+ "collectionsColumn/collectionError": "加载集合详情出错,请尝试上方刷新",
"bookmarksColumn": "书签",
- "searchBar/placeholder": "在 Hugging Face 上搜索模型...",
- "searchBar/huggingFaceError": "从 Hugging Face 获取结果时出现错误,请稍后再试",
- "sortBy": "排序依据",
+ "searchBar/placeholder": "按名称或作者搜索本地模型...",
+ "searchBar/huggingFaceError": "从 Hugging Face 获取结果时出错,请稍后重试",
+ "sortBy": "排序方式",
"searchSortKey.default/title": "最佳匹配",
"searchSortKey.likes/title": "最多点赞",
"searchSortKey.downloads/title": "最多下载",
"searchSortKey.lastModified/title": "最近更新",
"searchSortKey.createdAt/title": "最近创建",
- "download.option.willFitEstimation.caveat": "可能存在其他因素阻止其加载,例如模型架构、模型文件完整性或计算机上可用的内存量。",
- "download.option.willFitEstimation.fullGPUOffload/title": "可能能够完全加载进 GPU 显存",
- "download.option.willFitEstimation.fullGPUOffload/description": "此模型可能能够完全运行在您的 GPU 的显存中。这通常会显著加快推理速度。",
- "download.option.willFitEstimation.partialGPUOffload/title": "可能能够部分加载进 GPU 显存",
- "download.option.willFitEstimation.partialGPUOffload/description": "此模型可能能够部分运行在您的 GPU 的显存中。这通常会显著加快推理速度。",
+ "download.option.willFitEstimation.caveat": "可能还有其他因素阻止其加载,例如模型架构、模型文件完整性或计算机可用内存量。",
+ "download.option.willFitEstimation.fullGPUOffload/title": "可完全 GPU 卸载",
+ "download.option.willFitEstimation.fullGPUOffload/description": "此模型可能完全适合你的 GPU 内存。这可能会显著加快推理速度。",
+ "download.option.willFitEstimation.partialGPUOffload/title": "可部分 GPU 卸载",
+ "download.option.willFitEstimation.partialGPUOffload/description": "此模型可能部分适合你的 GPU 内存。这通常可能会显著加快推理速度。",
"download.option.willFitEstimation.fitWithoutGPU/title": "可能适合",
- "download.option.willFitEstimation.fitWithoutGPU/description": "此模型可能能够加载进您的机器的内存中。",
- "download.option.willFitEstimation.willNotFit/title": "对于此设备可能过大",
- "download.option.willFitEstimation.willNotFit/description": "加载此模型文件所需的内存可能超过您的设备上的可用内存。不推荐下载此文件。",
+ "download.option.willFitEstimation.fitWithoutGPU/description": "此模型可能适合你的机器内存。",
+ "download.option.willFitEstimation.willNotFit/title": "可能太大",
+ "download.option.willFitEstimation.willNotFit/description": "成功使用此模型文件的内存需求可能超过你机器上的可用资源。不建议下载此文件。",
"download.option.recommended/title": "推荐",
- "download.option.recommended/description": "基于您的硬件,推荐选择这个选项。",
+ "download.option.recommended/description": "根据你的硬件,推荐使用此选项。",
"download.option.downloaded/title": "已下载",
- "download.option.downloading/title": "正在下载 ({{progressPercentile}}%)",
+ "download.option.downloading/title": "下载中({{progressPercentile}}%)",
"failedToStartDownload": "开始下载失败",
-
- "feed.action.refresh": "刷新动态"
+
+ "feed.action.refresh": "刷新信息流"
}
diff --git a/zh-CN/download.json b/zh-CN/download.json
index dfd07f59..a11d0ebf 100644
--- a/zh-CN/download.json
+++ b/zh-CN/download.json
@@ -1,34 +1,34 @@
{
- "postDownloadActionExecutor.zipExtraction/status": "解压中...",
- "postDownloadActionExecutor.tarGzExtraction/status": "解压中...",
- "postDownloadActionExecutor.runtimeIndexerTarGzExtraction/status": "解压中...",
- "postDownloadActionExecutor.modifyModelData/status": "更新模型文件中...",
- "postDownloadActionExecutor.notification/status": "通知用户中...",
- "postDownloadActionExecutor.writeString/status": "写入元数据中...",
- "postDownloadActionExecutor.updateSelectedBackendVersions/status": "更新选中版本中...",
- "postDownloadActionExecutor.extensionPackAutoDeletion/status": "删除未使用的扩展中...",
- "postDownloadActionExecutor.pluginInstall/status": "安装插件中...",
- "postDownloadActionExecutor.pluginUninstall/status": "卸载插件中...",
+ "postDownloadActionExecutor.zipExtraction/status": "正在解压文件...",
+ "postDownloadActionExecutor.tarGzExtraction/status": "正在解压文件...",
+ "postDownloadActionExecutor.runtimeIndexerTarGzExtraction/status": "正在解压文件...",
+ "postDownloadActionExecutor.modifyModelData/status": "正在更新模型数据...",
+ "postDownloadActionExecutor.notification/status": "正在通知用户...",
+ "postDownloadActionExecutor.writeString/status": "正在写入元数据...",
+ "postDownloadActionExecutor.updateSelectedBackendVersions/status": "正在更新选定的版本...",
+ "postDownloadActionExecutor.extensionPackAutoDeletion/status": "正在删除未使用的扩展...",
+ "postDownloadActionExecutor.pluginInstall/status": "正在安装插件...",
+ "postDownloadActionExecutor.pluginUninstall/status": "正在卸载插件...",
- "finalizing": "完成下载...(这可能需要几分钟)",
+ "finalizing": "正在完成下载...(可能需要片刻)",
"noOptions": "没有可用的兼容下载选项",
-
+
"deeplink/confirmation/title": "从 Hugging Face 下载模型 🤗",
"deeplink/confirmation/subtitle": "{{modelName}}",
"deeplink/confirmation/selectRecommended": "选择推荐项",
"deeplink/confirmation/selectOption": "选择下载选项",
- "deeplink/confirmation/recommendedOption": "对大多数用户来说可能是最佳选项",
+ "deeplink/confirmation/recommendedOption": "可能是大多数用户的最佳选择",
"deeplink/confirmation/downloadButton": "下载",
- "deeplink/confirmation/nevermindButton": "取消",
+ "deeplink/confirmation/nevermindButton": "算了",
"deeplink/confirmation/modelPresent/title": "找到 Hugging Face 模型 ✅",
- "deeplink/confirmation/modelPresent/body": "好消息!此模型文件已经在您的本地机器上可用。",
- "deeplink/confirmation/loadInChat": "在新聊天中加载 {{ modelName }}",
- "deeplink/error/modelNotFound/title": "未找到该模型",
- "deeplink/error/modelNotFound/body": "请再次检查模型名称,并考虑尝试不同的下载选项。",
+ "deeplink/confirmation/modelPresent/body": "好消息!此模型文件已在你的本地机器上可用。",
+ "deeplink/confirmation/loadInChat": "在新对话中加载 {{ modelName }}",
+ "deeplink/error/modelNotFound/title": "哎呀,我们无法找到此模型",
+ "deeplink/error/modelNotFound/body": "请仔细检查模型名称,并考虑尝试不同的下载选项。",
"deeplink/actions/trySearching": "尝试在 Hugging Face 上搜索 {{modelName}}",
-
+
"downloadsPanel/title": "下载",
- "downloadsPanel/sectionTitle/ongoing": "正在进行",
+ "downloadsPanel/sectionTitle/ongoing": "进行中",
"downloadsPanel/sectionTitle/completed": "已完成",
"downloadsPanel": {
"reveal": {
@@ -37,7 +37,7 @@
"nonMac": "在文件资源管理器中显示",
"nonMac/error": "在文件资源管理器中显示失败"
},
- "completed": "下载完成!",
+ "completed": "下载完成",
"loadModel": "加载模型"
}
}
diff --git a/zh-CN/models.json b/zh-CN/models.json
index da248fdf..0d32434b 100644
--- a/zh-CN/models.json
+++ b/zh-CN/models.json
@@ -1,115 +1,132 @@
{
"pageTitle": "我的模型",
"filterModels.placeholder": "筛选模型...",
- "aggregate_one": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
- "aggregate_other": "您有 {{count}} 个本地模型,占用了 {{size}} 的磁盘空间。",
+ "aggregate_one": "你有 {{count}} 个本地模型,占用 {{size}} 磁盘空间",
+ "aggregate_other": "你有 {{count}} 个本地模型,占用 {{size}} 磁盘空间",
- "noModels.title": "您的本地 LLM 将显示在这里。",
- "noModels.discoverButtonText.prefix": "点击左侧边栏的",
- "noModels.discoverButtonText.suffix": "按钮来发现有趣的 LLM。",
- "noModels.discoverModelsPrompt": "去探索一些本地 LLM 吧!",
+ "noModels.title": "你的模型将显示在这里。",
+ "noModels.discoverButtonText.prefix": "点击",
+ "noModels.discoverButtonText.suffix": "按钮查找要下载的模型。",
+ "noModels.discoverModelsPrompt": "去探索一些本地大语言模型吧!",
"modelsTable.arch/label": "架构",
"modelsTable.params/label": "参数量",
"modelsTable.publisher/label": "发布者",
- "modelsTable.displayName/label": "名字",
- "modelsTable.modelKey/label": "模型密钥",
- "modelsTable.size/label": "尺寸",
- "modelsTable.dateModified/label": "修改日期",
+ "modelsTable.displayName/label": "名称",
+ "modelsTable.modelKey/label": "模型键",
+ "modelsTable.size/label": "大小",
+ "modelsTable.dateModified/label": "修改时间",
"modelsTable.actions/label": "操作",
- "modelsTable.quant/label": "量化规格",
- "modelsTable.llms/label": "语言模型",
+ "modelsTable.quant/label": "量化",
+ "modelsTable.llms/label": "大语言模型",
"modelsTable.embeddingModels/label": "嵌入模型",
"action.model.delete": "删除",
"action.model.delete.full": "删除模型",
"action.model.delete.confirmation/title": "删除 {{name}}",
- "action.model.delete.confirmation/description": "您确定吗?这将永久删除与此模型相关的所有文件,此操作不可逆。",
+ "action.model.delete.confirmation/description": "确定吗?这将永久删除你机器上与此模型相关的所有文件。此操作不可逆。",
"action.model.delete.confirmation/confirm": "删除",
"action.model.delete/error": "删除模型失败",
"loader.model.bundled": "捆绑",
"action.cancel": "取消",
"indexingOngoing": "正在索引模型... 这可能需要几秒钟",
+ "indexingPageLoaderText": "正在索引模型...",
"index/error_one": "索引以下文件夹失败:",
"index/error_other": "索引以下文件夹失败:",
"badModels/title_one": "索引以下模型失败:",
"badModels/title_other": "索引以下模型失败:",
- "badModels.virtualModelIncorrectPlacement": "虚拟模型放置错误。预期位置为 {{expected}}。实际位置为 {{actual}}。",
- "badModels.virtualModelBadManifest": "无效的虚拟模型清单 (model.yaml):",
+ "badModels.virtualModelIncorrectPlacement": "虚拟模型放置位置不正确。期望位置:{{expected}}。实际位置:{{actual}}。",
+ "badModels.virtualModelBadManifest": "无效的虚拟模型清单(model.yaml):",
"unresolvedVirtualModels/title_one": "解析以下虚拟模型失败:",
"unresolvedVirtualModels/title_other": "解析以下虚拟模型失败:",
"unresolvedVirtualModels.missingModel": "缺少依赖模型:{{missing}}。依赖路径:\n{{chain}}",
"unresolvedVirtualModels.circular": "检测到循环依赖。",
"unresolvedVirtualModels.fix": "修复",
- "unresolvedVirtualModels.revealInExplorer": "在文件资源管理器中显示",
+ "unresolvedVirtualModels.revealInExplorer": "在资源管理器中显示",
"unresolvedVirtualModels.revealInFinder": "在 Finder 中显示",
"unresolvedVirtualModels.reveal/error": "显示失败",
"modelsDirectory": "模型目录",
"modelsDirectory.change": "更改...",
- "modelsDirectory.change/error": "修改模型路径失败",
+ "modelsDirectory.change/error": "更改模型目录失败",
"modelsDirectory.reset": "重置为默认路径",
"modelsDirectory.reveal.mac": "在 Finder 中显示",
"modelsDirectory.reveal.nonMac": "在文件资源管理器中打开",
- "modelsDirectory.reveal.mac/error": "在 Finder 中显示失败",
+ "modelsDirectory.reveal.mac/error": "在 Finder 中打开失败",
"modelsDirectory.reveal.nonMac/error": "在文件资源管理器中打开失败",
- "modelsDirectory.forceReindex": "刷新",
+ "modelsDirectory.forceReindex": "刷新模型列表",
"loadState/loaded": "已加载",
"loadState/loading": "加载中",
"loadState/unloaded": "未加载",
"loadState/unloading": "卸载中",
"loadState/idle": "空闲",
- "pinned": "此模型已被固定。右键点击取消固定。",
- "lastUsed": "最后使用的",
- "contextMenu/pin": "固定到顶部",
- "contextMenu/unpin": "取消固定",
+ "pinned": "此模型已置顶。右键点击取消置顶。",
+ "lastUsed": "上次使用",
+ "contextMenu/pin": "置顶",
+ "contextMenu/unpin": "取消置顶",
"contextMenu/copyAbsolutePath": "复制绝对路径",
"contextMenu/copyModelName": "复制模型路径",
- "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
- "contextMenu/showRawMetadata": "查看原始元数据",
+ "contextMenu/copyModelDefaultIdentifier": "复制默认标识符",
+ "contextMenu/showRawMetadata": "显示原始元数据",
"contextMenu/openOnHuggingFace": "在 Hugging Face 上打开",
"contextMenu": {
"showOnWeb": "在网页上显示",
"pullLatest": {
- "label": "拉取最新版本",
- "checking": "检查更新中...",
- "upToDate": "已是最新版本",
+ "label": "拉取最新",
+ "checking": "正在检查更新...",
+ "upToDate": "已是最新",
"error": "检查更新失败"
}
},
"tooltip/moreActions": "更多操作",
"tooltip/getInfo": "获取信息",
"tooltip/editModelDefaultConfig": "编辑模型默认配置",
- "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(*当前有覆盖)",
- "tooltip/visionBadge": "此模型支持图像输入",
- "tooltip/toolUseBadge": "此模型经过工具使用的训练",
+ "tooltip/editModelDefaultConfig/override": "编辑模型默认配置(* 有覆盖)",
+ "tooltip/visionBadge": "此模型可以处理图片输入",
+ "tooltip/toolUseBadge": "此模型已针对工具使用进行训练",
- "visionBadge/label": "此模型支持图像输入",
- "toolUseBadge/label": "此模型经过工具使用的训练",
+ "visionBadge/label": "已启用视觉",
+ "toolUseBadge/label": "已针对工具使用训练",
"loader.action.load": "加载模型",
"loader.action.clearChanges": "清除更改",
"loader.action.cancel": "取消",
"loader.info.clickOnModelToLoad": "点击模型以加载",
"loader.info.configureLoadParameters": "配置模型加载参数",
- "loader.info.activeGeneratorWarning": "您正在使用带有自定义生成器的插件。当前加载的模型是否适用于该插件,取决于生成器的具体实现方式",
+ "loader.info.activeGeneratorWarning": "你正在使用带有自定义生成器的插件。当前加载的模型可能会或可能不会被应用于此插件,取决于生成器的实现",
+ "loader.guardrails.estimatedMemoryUsage": "预估内存使用",
+ "loader.guardrails.total": "总计",
+ "loader.guardrails.gpu": "GPU",
+ "loader.guardrails.unavailable": "此模型的内存预估不可用",
+ "loader.guardrails.notEnoughResources": "资源不足以使用当前设置加载模型",
+ "loader.guardrails.notEnoughResources/options": "选项",
+ "loader.guardrails.notEnoughResources.moreInfoSection.appearsNotEnoughMemory": "你的系统似乎没有足够的内存来加载此模型。",
+ "loader.guardrails.notEnoughResources.moreInfoSection.ifYouBelieveThisIsIncorrect": "你可以在设置中调整模型加载保护,或按住 强制加载。",
+ "loader.guardrails.notEnoughResources.moreInfoSection.warning": "加载过大的模型可能会使系统过载并导致冻结。",
+ "loader.guardrails.notEnoughResources.alwaysAllowLoadAnyway": "(不推荐)始终允许\"强制加载\"而无需按住 Alt/Option",
"virtual": {
"local": {
"create": "创建虚拟模型",
- "title": "创建一个本地虚拟模型",
- "description": "通过将模型与一组配置捆绑来创建虚拟模型,模型的底层权重文件不会被复制。",
- "modelKey.label": "模型密钥",
- "modelKey.placeholder": "输入唯一的模型密钥",
- "modelKey.normalized": "您的模型密钥将被规范化为:{{normalized}}",
+ "title": "创建本地虚拟模型",
+ "description": "通过将模型与一组配置捆绑来创建虚拟模型。底层权重不会被复制。",
+ "modelKey.label": "模型键",
+ "modelKey.placeholder": "输入唯一的模型键",
+ "modelKey.normalized": "你的模型键将被规范化为:{{normalized}}",
"baseModel.label": "基础模型",
"baseModel.placeholder": "选择基础模型",
- "baseModel.empty": "下载模型作为基础模型",
+ "baseModel.empty": "下载一个模型作为基础模型",
"next": "下一步",
"confirm": "创建",
"error": "创建虚拟模型失败"
+ },
+ "altsSelect": {
+ "title": "切换模型源",
+ "resetButton": "重置为默认",
+ "description": "此模型有多个可用的源文件。",
+ "trigger": "变体"
}
}
}
diff --git a/zh-CN/onboarding.json b/zh-CN/onboarding.json
index 5600f0f3..43ddb488 100644
--- a/zh-CN/onboarding.json
+++ b/zh-CN/onboarding.json
@@ -3,21 +3,21 @@
"action.next": "下一步",
"action.back": "上一步",
"action.finish": "完成",
-
+
"dismissable_rag_modal": {
- "description": "现在您可以使用检索增强生成 (RAG) 以使用自己的文档与模型进行聊天。以下是其工作原理:",
+ "description": "你现在可以使用检索增强生成(RAG)与自己的文档聊天。工作原理如下:",
"instructions": {
"attach_files": {
- "title": "上传文件",
- "description": "一次最多可上传5个文件,总大小不超过30MB。支持的格式包括PDF、DOCX、TXT和CSV。"
+ "title": "附加文件",
+ "description": "每次最多上传 5 个文件,最大总大小为 30MB。支持的格式包括 PDF、DOCX、TXT 和 CSV。"
},
"be_specific": {
"title": "具体明确",
- "description": "提问时,尽可能多地提及细节。这有助于系统从您的文档中检索最相关的信息。"
+ "description": "提问时尽可能提及更多细节。这有助于系统从你的文档中检索最相关的信息。"
},
"get_responses": {
- "title": "获取回应并实验",
- "description": "大语言模型将查看您的查询和从文档中检索到的摘录,并尝试生成回应。通过尝试不同的查询来找到最佳方法。"
+ "title": "获取回复并尝试",
+ "description": "大语言模型会查看你的查询和从文档中检索到的摘录,并尝试生成回复。尝试不同的查询以找到最佳效果。"
}
}
},
@@ -25,18 +25,18 @@
"toolUse": {
"step_0": {
"title": "Beta:工具使用 🛠️(函数调用)",
- "text_0": "某些模型(例如 Llama 3.1/3.2、Mistral、Qwen 等)经过工具使用的训练。",
- "text_1": "实际操作的含义是:您以特定格式向大语言模型提供一组「工具」(函数签名),模型会根据用户的提示决定是否调用这些工具。",
- "text_2": "您可以想象一些应用场景,例如调用 API、运行代码,或任何可通过函数调用表达的操作。"
+ "text_0": "某些模型(如 Llama 3.1/3.2、Mistral、Qwen 等)已针对工具使用进行训练。",
+ "text_1": "这在实践中意味着:你以特定格式向大语言模型提供一组\"工具\"(函数签名),大语言模型可以根据用户的提示决定是否\"调用\"它们。",
+ "text_2": "你可以想象诸如查询 API、运行代码或任何可以表示为函数调用的用例。"
},
"step_1": {
- "title": "开始使用工具功能",
- "toolUseCanWorkWithAnyModel": "经过工具使用训练的模型表现会更优,但您也可以尝试在任意模型上使用工具。阅读文档了解详情。\n经过工具使用训练的模型会带有新标识:",
- "hasCompatibleModel": "🎉 看来您已有支持工具使用的模型!",
- "downloadRecommendedModel": "下载经过工具使用训练的模型:"
+ "title": "开始使用工具使用功能",
+ "toolUseCanWorkWithAnyModel": "针对工具使用训练的模型会比其他模型表现更好,但你可以尝试在任何模型上使用工具。阅读文档了解更多。\n针对工具使用训练的模型将标有新徽章:",
+ "hasCompatibleModel": "🎉 看起来你已经有支持工具的模型了!",
+ "downloadRecommendedModel": "下载针对工具使用训练的模型:"
},
"nextButton": "下一步",
"letsGoButton": "加载模型并启动服务器",
- "doneButton": "忽略"
+ "doneButton": "关闭"
}
}
diff --git a/zh-CN/settings.json b/zh-CN/settings.json
index 3c0cfbe3..450ab2c2 100644
--- a/zh-CN/settings.json
+++ b/zh-CN/settings.json
@@ -4,192 +4,211 @@
"accountDialogButtonTooltip": "账户",
"settingsNewButtonPopover": {
- "primary": "应用设置现已移至右下角",
- "secondary": "点击⚙️按钮来打开",
- "tertiary": "或者按"
+ "primary": "应用设置现在位于右下角",
+ "secondary": "点击 ⚙️ 按钮打开设置。",
+ "tertiary": "或按"
},
"appUpdate": "应用更新",
"checkingAppUpdate": "正在检查更新...",
"checkForUpdates": "检查更新",
"failedCheckingAppUpdate": "检查更新失败",
- "newUpdateAvailable": "LM Studio 有新版本可用!🎉",
- "newBetaUpdateAvailable": "LM Studio 有新测试版可用!🛠️🎉",
+ "newUpdateAvailable": "有新版本的 LM Studio 可用!🎉",
+ "newBetaUpdateAvailable": "有新版本的 LM Studio Beta 可用!🛠️🎉",
"downloadingInProgress": "正在下载更新...",
- "downloadUpdate": "更新至 LM Studio {{version}}",
- "downloadBetaUpdate": "更新至 LM Studio 测试版 {{version}} (版本号 {{build}})",
+ "downloadUpdate": "更新到 LM Studio {{version}}",
+ "downloadBetaUpdate": "更新到 LM Studio Beta {{version}}(构建 {{build}})",
"downloadCompleted": "下载完成!",
- "updateDownloadComplete": "更新下载成功!",
+ "updateDownloadComplete": "LM Studio 更新已就绪",
"updateDownloadFailed": "更新失败!",
- "hasFinishedDownloading": "下载完毕",
- "yourCurrentVersion": "当前版本为:",
- "latestVersion": "最新版本为:",
+ "hasFinishedDownloading": "已完成下载。",
+ "yourCurrentVersion": "你当前版本:",
+ "latestVersion": "最新版本:",
"downloadLabel": "立即更新",
"downloadLabel/Linux": "下载更新",
"cancelDownloadLabel": "取消",
"downloadingUpdate": "正在下载 {{item}}...",
- "updateDownloaded": "新更新已成功下载。重启应用以应用更新",
- "restartAppToUpdate": "重新启动应用以应用更新",
- "appUpdatedToastTitle": "已更新至 {{title}}",
- "appUpdatedToastDescriptionPrefix": "查看",
- "AppUpdatedToastDescriptionReleaseNotes": "发行说明",
- "toolUseToastTitle": "测试新功能:工具调用与函数调用 API",
- "toolUseToastDescription": "支持 Llama 3.1/3.2、Mistral、Qwen 等部分模型,兼容 OpenAI 工具调用,快速上手。",
- "toolUseToastButtonText": "前往开发者页面体验",
- "doItLater": "稍后再说",
- "failedToUpdate": "应用更新失败。请检查您的网络连接或稍后再试。",
- "retryInBackground": "后台重试",
+ "updateDownloaded": "需要重启应用以应用更新",
+ "restartAppToUpdate": "重启应用以应用更新",
+ "appUpdatedToastTitle": "已更新到 {{title}}",
+ "appUpdatedToastDescriptionPrefix": "查看 ",
+ "AppUpdatedToastDescriptionReleaseNotes": "发布说明",
+ "toolUseToastTitle": "Beta 新功能:工具使用和函数调用 API",
+ "toolUseToastDescription": "对 OpenAI 工具使用的直接替代,支持 Llama 3.1/3.2、Mistral 和 Qwen 等模型。",
+ "toolUseToastButtonText": "前往开发者页面尝试",
+ "doItLater": "稍后处理",
+ "failedToUpdate": "应用更新失败。请仔细检查你的互联网连接或稍后重试。",
+ "retryInBackground": "在后台重试",
"laterLabel": "稍后",
- "releaseNotesLabel": "发行说明",
+ "releaseNotesLabel": "发布说明",
"remindMeLater": "稍后提醒我",
"failedDownloadUpdate": "下载更新失败",
- "installAndRelaunch": "安装并重新启动",
- "uptodate": "您的应用已是最新版本!当前版本为 {{version}}",
+ "installAndRelaunch": "安装并重启",
+ "uptodate": "你已是最新版本!当前版本为 {{version}}",
"preferences": "偏好设置",
- "general": "常规",
+ "general": "通用",
"sideButtonLabels": "显示侧边按钮标签",
- "showModelFileNames": "在“我的模型”中始终显示完整模型文件名",
+ "showModelFileNames": "我的模型:始终显示完整模型文件名",
"colorThemeLabel": "颜色主题",
+ "appNavigationBarPositionLabel": "导航栏位置",
+ "appNavigationBarPositionTop": "顶部",
+ "appNavigationBarPositionLeft": "左侧",
"complexityLevelLabel": "用户界面复杂度级别",
- "selectComplexityLevelPlaceholder": "选择默认的UI复杂度级别",
+ "selectComplexityLevelPlaceholder": "选择默认 UI 复杂度级别",
"userComplexityLevelLabel": "普通用户",
"powerUserComplexityLevelLabel": "高级用户",
"developerComplexityLevelLabel": "开发者",
- "chatSettingsLabel": "聊天设置",
- "chat/alwaysShowPromptTemplate": "始终在聊天侧栏显示提示模板",
- "chat/highlightChatMessageOnHover": "鼠标悬停时高亮显示聊天消息",
- "chat/doubleClickMessageToEdit": "双击聊天消息以编辑",
+ "chatSettingsLabel": "对话设置",
+ "chat/alwaysShowPromptTemplate": "始终在对话侧边栏显示提示词模板",
+ "chat/highlightChatMessageOnHover": "悬停时高亮对话消息",
+ "chat/doubleClickMessageToEdit": "双击对话消息进行编辑",
- "chat/aiNaming/label": "AI命名聊天",
- "chat/aiNaming/mode/label": "AI生成的聊天名称",
- "chat/aiNaming/mode/value/never": "关闭",
- "chat/aiNaming/mode/value/never/subTitle": "不使用AI生成聊天名称",
+ "chat/aiNaming/label": "对话 AI 命名",
+ "chat/aiNaming/mode/label": "AI 生成的对话名称",
+ "chat/aiNaming/mode/value/never": "从不",
+ "chat/aiNaming/mode/value/never/subTitle": "不创建 AI 生成的对话名称",
"chat/aiNaming/mode/value/auto": "自动",
- "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度自动决定是否使用AI生成聊天名称",
- "chat/aiNaming/mode/value/always": "开启",
- "chat/aiNaming/mode/value/always/subTitle": "使用AI生成聊天名称",
- "chat/aiNaming/emoji": "在AI生成的聊天名称中使用表情符号",
+ "chat/aiNaming/mode/value/auto/subTitle": "根据生成速度决定是否创建名称",
+ "chat/aiNaming/mode/value/always": "始终",
+ "chat/aiNaming/mode/value/always/subTitle": "无论生成速度如何都创建 AI 生成的对话名称",
+ "chat/aiNaming/emoji": "在 AI 生成的对话名称中使用表情符号",
"chat/keyboardShortcuts/label": "键盘快捷键",
"chat/keyboardShortcuts/verbPrefix": "使用",
- "chat/keyboardShortcuts/regenerate": "重新生成聊天中的最后一条消息",
+ "chat/keyboardShortcuts/regenerate": "重新生成对话中的最后一条消息",
"chat/keyboardShortcuts/sendMessage": "发送消息",
- "onboarding/blockTitle": "新手引导",
- "onboarding/dismissedHints": "已关闭的新手引导",
- "onboarding/resetHintTooltip": "点击以重新启用新手引导",
- "onboarding/resetAllHints": "重置所有新手引导",
- "onboarding/noneDismissed": "没有已关闭的提示,目前所有提示项都会显示,直至下次关闭",
+ "onboarding/blockTitle": "引导提示",
+ "onboarding/dismissedHints": "已关闭的引导提示",
+ "onboarding/resetHintTooltip": "点击以重新启用此引导提示",
+ "onboarding/resetAllHints": "重置所有引导提示",
+ "onboarding/noneDismissed": "没有已关闭的提示,当前所有引导辅助提示都将显示,直到下次关闭",
- "firstTimeExperienceLabel": "首次聊天体验",
+ "firstTimeExperienceLabel": "首次对话体验",
"firstTimeExperienceMarkCompletedLabel": "标记为已完成",
"firstTimeExperienceResetLabel": "重置",
- "showPromptSuggestionsLabel": "创建新聊天时显示提示建议",
+ "showPromptSuggestionsLabel": "创建新对话时显示提示词建议",
"darkThemeLabel": "深色",
"lightThemeLabel": "浅色",
"systemThemeLabel": "自动",
"sepiaThemeLabel": "护眼",
- "unloadPreviousModelLabel": "选择要加载的模型时,先卸载所有当前已加载的模型",
- "languageLabel": "语言",
+ "unloadPreviousModelLabel": "选择要加载的模型时,先卸载当前加载的任何模型",
+ "languageLabel": "应用语言",
"changeLanguageLabel": "选择应用语言(仍在开发中)",
"developerLabel": "开发者",
- "localServiceLabel": "本地 LLM 服务(无界面)",
+ "localServiceLabel": "本地大语言模型服务(无界面)",
+ "modelDefaultsLabel": "模型默认设置",
"showExperimentalFeaturesLabel": "显示实验性功能",
"appFirstLoadLabel": "应用首次加载体验",
- "showDebugInfoBlocksInChatLabel": "在聊天中显示调试信息块",
+ "showDebugInfoBlocksInChatLabel": "在对话中显示调试信息块",
"autoLoadBundledLLMLabel": "启动时自动加载捆绑的大语言模型",
- "showReleaseNotes": "显示发行说明",
- "hideReleaseNotes": "隐藏发行说明",
+ "showReleaseNotes": "显示发布说明",
+ "hideReleaseNotes": "隐藏发布说明",
- "backendDownloadNewUpdate": "有新的后端可用!",
+ "backendDownloadNewUpdate": "有更新的后端可用!",
"backendDownloadNewUpdateAction": "前往开发者页面",
- "backendDownloadChannel.label": "LM Studio 扩展包下载频道",
+ "backendDownloadChannel.label": "LM Studio 扩展包下载通道",
"backendDownloadChannel.value.stable": "稳定版",
"backendDownloadChannel.value.beta": "测试版",
"backendDownloadChannel.value.latest": "开发版",
- "backendDownloadChannel.shortLabel": "运行环境下载频道",
- "backendDownloadChannel.hint": "选择从哪个频道下载 LM Studio 扩展包。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+ "backendDownloadChannel.shortLabel": "运行时下载通道",
+ "backendDownloadChannel.hint": "选择下载 LM Studio 扩展包的通道。\"{{stableName}}\"是大多数用户推荐的通道。",
- "appUpdateChannel.label": "LM Studio 更新频道",
+ "appUpdateChannel.label": "更新通道",
"appUpdateChannel.value.stable": "稳定版",
- "appUpdateChannel.value.beta": "beta测试版",
- "appUpdateChannel.value.alpha": "alpha测试版",
- "appUpdateChannel.shortLabel": "应用更新频道",
- "appUpdateChannel.hint": "选择从哪个频道接收 LM Studio 应用更新。\"{{stableName}}\" 是推荐给大多数用户的通道。",
+ "appUpdateChannel.value.beta": "测试版",
+ "appUpdateChannel.value.alpha": "内测版",
+ "appUpdateChannel.shortLabel": "应用更新通道",
+ "appUpdateChannel.hint": "选择接收 LM Studio 应用更新的通道。\"{{stableName}}\"是大多数用户推荐的通道。",
"modelLoadingGuardrails.label": "模型加载保护",
- "modelLoadingGuardrails.description": "超出系统资源限制加载模型可能导致系统不稳定或卡死。保护措施可以防止意外过载。如果需要,可以在这里调整这些限制。但请注意,接近系统极限加载模型可能会降低稳定性。",
+ "modelLoadingGuardrails.description": "加载超出系统资源限制的模型可能导致系统不稳定或冻结。保护功能可防止意外过载。如有必要,可在此处调整这些限制,但请注意,加载接近系统极限的模型可能会降低稳定性。",
"modelLoadingGuardrails.value.off": "关闭(不推荐)",
"modelLoadingGuardrails.value.off/subTitle": "不对系统过载采取预防措施",
"modelLoadingGuardrails.value.off/detail": "关闭详情",
"modelLoadingGuardrails.value.low": "宽松",
- "modelLoadingGuardrails.value.low/subTitle": "轻微预防系统过载",
+ "modelLoadingGuardrails.value.low/subTitle": "对系统过载采取轻微预防措施",
"modelLoadingGuardrails.value.low/detail": "宽松详情",
"modelLoadingGuardrails.value.medium": "平衡",
- "modelLoadingGuardrails.value.medium/subTitle": "适度预防系统过载",
+ "modelLoadingGuardrails.value.medium/subTitle": "对系统过载采取适度预防措施",
"modelLoadingGuardrails.value.medium/detail": "平衡详情",
"modelLoadingGuardrails.value.high": "严格",
- "modelLoadingGuardrails.value.high/subTitle": "强烈预防系统过载",
+ "modelLoadingGuardrails.value.high/subTitle": "对系统过载采取强预防措施",
"modelLoadingGuardrails.value.high/detail": "严格详情",
"modelLoadingGuardrails.value.custom": "自定义",
- "modelLoadingGuardrails.value.custom/subTitle": "设置最大可加载模型大小的自定义限制",
+ "modelLoadingGuardrails.value.custom/subTitle": "设置可加载的最大模型大小的自定义限制",
"modelLoadingGuardrails.value.custom/detail": "自定义详情",
"modelLoadingGuardrails.custom.label": "内存限制:",
"modelLoadingGuardrails.custom.unitGB": "GB",
- "modelLoadingGuardrails.custom.description": "为模型加载设置自定义内存限制。如果加载的模型会超过此限制,则不会加载模型。",
+ "modelLoadingGuardrails.custom.description": "设置模型加载的自定义内存限制。如果加载模型会超过此限制,则不会加载。",
+ "modelLoadingGuardrails.alwaysAllowLoadAnyway": "(不推荐)始终允许\"强制加载\"而无需按住 Alt/Option",
"experimentalLoadPresets": "在预设中启用模型加载配置支持",
- "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能尚处于试验阶段,我们欢迎反馈。",
+ "experimentalLoadPresets.description": "是否允许预设包含模型加载配置。此功能处于实验阶段,我们欢迎反馈。",
- "unloadPreviousJITModelOnLoad": "模型自动卸载:始终仅允许一个JIT模型加载(加载新模型时卸载上一个)",
- "autoDeleteExtensionPacks": "自动删除最近最少使用的运行环境扩展包",
- "autoUpdateExtensionPacks": "自动更新选中运行环境扩展包",
+ "unloadPreviousJITModelOnLoad": "JIT 模型自动驱逐:确保在任何给定时间最多只有 1 个模型通过 JIT 加载(卸载之前的模型)",
+ "autoDeleteExtensionPacks": "自动删除最近最少使用的运行时扩展包",
+ "autoUpdateExtensionPacks": "自动更新选定的运行时扩展包",
"useHFProxy.label": "使用 LM Studio 的 Hugging Face 代理",
- "useHFProxy.hint": "使用 LM Studio 提供的 Hugging Face 代理进行模型搜索和下载。适用于无法直接访问 Hugging Face 的用户。",
- "separateReasoningContentInResponses": "在API响应中区分 `reasoning_content` 和 `content`(如适用)",
- "separateReasoningContentInResponses/hint": "该设置仅适用于像 DeepSeek R1 及其蒸馏模型等输出带有 和 标记的推理模型。",
+ "useHFProxy.hint": "使用 LM Studio 的 Hugging Face 代理来搜索和下载模型。这可以帮助无法直接访问 Hugging Face 的用户。",
+ "separateReasoningContentInResponses": "在适用时,在 API 响应中分离 `reasoning_content` 和 `content`",
+ "separateReasoningContentInResponses/hint": "此设置仅适用于\"推理\"模型,如 DeepSeek R1、其蒸馏变体和其他在 `` 和 `` 标签中产生思维链的模型。",
- "promptWhenCommittingUnsavedChangesWithNewFields": "提交新字段到预设时显示确认对话框",
- "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果您想避免意外向预设添加新字段,这将非常有用",
+ "promptWhenCommittingUnsavedChangesWithNewFields": "预设:提交新字段到预设时显示确认对话框",
+ "promptWhenCommittingUnsavedChangesWithNewFields.description": "如果你想防止意外向预设添加新字段,这很有用",
- "enableLocalService": "启用本地 LLM 服务",
- "enableLocalService.subtitle": "使用 LM Studio 的 LLM 服务器,而无需保持 LM Studio 应用程序打开",
- "enableLocalService.description": "启用时,LM Studio 本地 LLM 服务将自动启动。关闭 LM Studio 时,本地 LLM 服务也将在系统托盘中继续运行。",
+ "enableLocalService": "启用本地大语言模型服务",
+ "enableLocalService.subtitle": "无需保持 LM Studio 应用程序打开即可使用 LM Studio 的大语言模型服务器",
+ "enableLocalService.description": "启用后,LM Studio 本地大语言模型服务将在启动时启动。关闭 LM Studio 也会使本地大语言模型服务在系统托盘中继续运行。",
- "expandConfigsOnClick": "点击而非悬停时展开配置",
+ "expandConfigsOnClick": "点击展开配置而非悬停",
"migrateChats": {
- "label": "迁移 0.3.0 之前的聊天记录",
- "hasBetterLabel": "重新迁移 0.3.0 之前的聊天记录",
- "action_one": "迁移 1 条聊天记录",
- "action_other": "迁移 {{count}} 条聊天记录",
- "inProgress": "正在迁移聊天记录...",
+ "label": "迁移 0.3.0 之前的对话",
+ "hasBetterLabel": "重新迁移 0.3.0 之前的对话",
+ "action_one": "迁移 1 个对话",
+ "action_other": "迁移 {{count}} 个对话",
+ "inProgress": "正在迁移对话...",
"hint": {
- "primary": "我们对 v0.3.0+ 版本的聊天记录内部数据结构进行了改造,以支持多版本聊天消息等功能。为了让旧聊天记录出现在应用中,需要将其迁移到新格式。",
- "details": "迁移过程不会删除您的旧聊天记录,而是会创建一个新格式的副本。",
- "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ "primary": "我们为 v0.3.0+ 的对话重构了内部数据结构以支持多版本对话消息等功能。要让旧对话出现在应用中,需要将它们迁移到新格式。",
+ "details": "迁移过程不会删除你的旧对话,而是复制一份到新格式。",
+ "footer": "你仍然可以通过旧版本的 LM Studio 访问你的旧对话。目前,图片不会自动迁移。"
},
"hasBetterHint": {
- "primary": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您想要再次运行它吗?",
- "details": "迁移过程将创建一个包含新迁移聊天记录的新文件夹。您的旧聊天记录将保持不变。",
- "footer": "您仍然可以通过旧版本的 LM Studio 访问您的旧聊天记录。目前图片不会自动迁移。"
+ "primary": "自从你迁移旧对话以来,我们改进了对话迁移器。你想再次运行它吗?",
+ "details": "迁移过程将创建一个新文件夹来存放新迁移的对话。你的旧对话将保持不变。",
+ "footer": "你仍然可以通过旧版本的 LM Studio 访问你的旧对话。目前,图片不会自动迁移。"
},
- "success": "聊天记录迁移成功!",
- "success_one": "1 条聊天记录迁移成功",
- "success_other": "{{count}} 条聊天记录迁移成功",
- "showInstructionsButton": "显示指南",
- "footerCardText": "来自 LM Studio 早期版本的聊天记录需要迁移才能在此版本中使用。",
- "hasBetterFooterCardText": "自从您上次迁移旧聊天记录以来,我们已经改进了聊天记录迁移器。您可以重新运行迁移。(我们将创建一个包含新迁移聊天记录的新文件夹。)",
+ "success": "成功迁移对话!",
+ "success_one": "成功迁移 1 个对话",
+ "success_other": "成功迁移 {{count}} 个对话",
+ "showInstructionsButton": "显示说明",
+ "footerCardText": "需要迁移以前版本的 LM Studio 的对话才能在此版本中使用。",
+ "hasBetterFooterCardText": "自从你迁移旧对话以来,我们改进了对话迁移器。你可以重新运行迁移过程。(我们将创建一个新文件夹来存放新迁移的对话。)",
"dismissConfirm": "关闭",
- "dismissConfirmDescription": "您随时可以在设置中处理聊天记录迁移"
+ "dismissConfirmDescription": "你随时可以在设置中处理对话迁移"
},
"toolConfirmation": {
"label": "工具调用确认",
"neverAsk": {
- "label": "运行工具前不再询问确认(不建议)",
- "hint": "禁用运行工具前的确认提示。不建议此操作。",
+ "label": "运行工具前从不请求确认(不推荐)",
+ "hint": "禁用运行工具前的确认。不推荐。",
"warnTitle": "确定吗?",
- "warnDescription": "禁用工具调用确认非常危险。如果您的插件中包含可能执行破坏性操作的工具(例如运行命令、删除文件、覆盖文件、上传文件等),模型将无需确认即可执行这些操作。您可以通过逐个工具或逐个插件的方式禁用确认提示。强烈不建议启用此选项。请谨慎操作。",
+ "warnDescription": "禁用工具调用确认是危险的。如果你的任何插件贡献了一个可能执行破坏性操作的工具(如运行命令、删除文件、覆盖文件、上传文件等),模型将能够在没有任何确认的情况下执行。你可以随时按工具或插件禁用确认。启用此选项不推荐。请谨慎。",
"warnButton": "我了解风险"
}
+ },
+
+ "defaultContextLength": {
+ "label": "默认上下文长度",
+ "maxTitle": "模型最大值",
+ "customTitle": "自定义值",
+ "maxSubtitle": "使用每个模型支持的最大上下文长度。",
+ "customSubtitle": "设置加载新模型的默认上下文长度。如果模型支持的最大上下文长度较低,则使用该值。",
+ "invalidNaNError": "无效的上下文长度值。使用 {{value}}",
+ "invalidRangeError": "无效的上下文长度值。应在 1 到 2^30 范围内。使用 {{value}}",
+ "largeContextWarning": "上下文长度越高,模型占用的内存越多。如果不确定,请不要更改默认值"
+ },
+ "jitTTL": {
+ "subtitle": "JIT 加载的模型将在闲置指定时间后自动卸载。"
}
}
diff --git a/zh-CN/shared.json b/zh-CN/shared.json
index 24fc7431..87377cb6 100644
--- a/zh-CN/shared.json
+++ b/zh-CN/shared.json
@@ -1,22 +1,22 @@
{
"copyLmStudioLinkButton/toolTip": "复制模型下载链接",
- "filter.noMatches": "没有匹配项",
+ "filter.noMatches": "无匹配项",
"longRunningTask": {
"unbundlingDependencies": {
- "badge": "解包资源"
+ "badge": "正在提取资源"
},
"performingBackendHardwareSurvey": {
- "badge": "检测运行环境兼容性"
+ "badge": "正在检查运行时兼容性"
},
"indexingRuntimes": {
- "badge": "索引运行环境"
+ "badge": "正在索引运行时"
},
"indexingModels": {
- "badge": "索引模型"
+ "badge": "正在索引模型"
},
"authenticating": {
- "badge": "身份验证中"
+ "badge": "正在认证"
},
"autoUpdatingExtensionPack": {
"badge": "正在更新扩展包({{name}} v{{version}})"
@@ -24,14 +24,14 @@
},
"auth": {
"prompt": "登录 LM Studio Hub",
- "authError": "身份验证失败",
- "noAccount": "还没有账号?",
+ "authError": "认证失败",
+ "noAccount": "没有账户?",
"signUp": "注册",
"havingTrouble": "遇到问题?",
"retry": "重试"
},
"artifacts": {
- "fetchError": "获取工件失败",
+ "fetchError": "获取构件失败",
"organizationVisible": "组织可见"
},
@@ -41,21 +41,22 @@
"private": "私有",
"yes": "是",
"no": "否",
- "go": "开始",
+ "go": "前往",
"proceedWithEllipsis": "继续...",
"proceed": "继续",
"inProgress": "进行中...",
"failed": "失败",
- "pending": "待处理",
+ "pending": "待定",
"doneWithExclamation": "完成!",
"done": "完成",
+ "beta": "测试版",
"complete": {
"completeWithEllipsis": "完成...",
"complete": "完成",
- "completingWithEllipsis": "完成中...",
- "completing": "完成中",
+ "completingWithEllipsis": "正在完成...",
+ "completing": "正在完成",
"completedWithExclamation": "已完成!",
"completed": "已完成"
},
@@ -63,8 +64,8 @@
"cancel": {
"cancelWithEllipsis": "取消...",
"cancel": "取消",
- "cancelingWithEllipsis": "取消中...",
- "canceling": "取消中",
+ "cancelingWithEllipsis": "正在取消...",
+ "canceling": "正在取消",
"canceled": "已取消"
},
@@ -81,8 +82,8 @@
"close": {
"closeWithEllipsis": "关闭...",
"close": "关闭",
- "closingWithEllipsis": "关闭中...",
- "closing": "关闭中",
+ "closingWithEllipsis": "正在关闭...",
+ "closing": "正在关闭",
"closedWithExclamation": "已关闭!",
"closed": "已关闭"
},
@@ -90,8 +91,8 @@
"delete": {
"deleteWithEllipsis": "删除...",
"delete": "删除",
- "deletingWithEllipsis": "删除中...",
- "deleting": "删除中",
+ "deletingWithEllipsis": "正在删除...",
+ "deleting": "正在删除",
"deletedWithExclamation": "已删除!",
"deleted": "已删除"
},
@@ -99,23 +100,23 @@
"retry": {
"retryWithEllipsis": "重试...",
"retry": "重试",
- "retryingWithEllipsis": "重试中...",
- "retrying": "重试中"
+ "retryingWithEllipsis": "正在重试...",
+ "retrying": "正在重试"
},
"refresh": {
"refreshWithEllipsis": "刷新...",
"refresh": "刷新",
- "refreshingWithEllipsis": "刷新中...",
- "refreshing": "刷新中",
+ "refreshingWithEllipsis": "正在刷新...",
+ "refreshing": "正在刷新",
"refreshedWithExclamation": "已刷新!",
"refreshed": "已刷新"
},
"confirm": {
"confirm": "确认",
- "confirmingWithEllipsis": "确认中...",
- "confirming": "确认中",
+ "confirmingWithEllipsis": "正在确认...",
+ "confirming": "正在确认",
"confirmedWithExclamation": "已确认!",
"confirmed": "已确认"
},
@@ -123,8 +124,8 @@
"copy": {
"copyWithEllipsis": "复制...",
"copy": "复制",
- "copyingWithEllipsis": "复制中...",
- "copying": "复制中",
+ "copyingWithEllipsis": "正在复制...",
+ "copying": "正在复制",
"copiedWithExclamation": "已复制!",
"copied": "已复制"
},
@@ -132,8 +133,8 @@
"edit": {
"editWithEllipsis": "编辑...",
"edit": "编辑",
- "editingWithEllipsis": "编辑中...",
- "editing": "编辑中",
+ "editingWithEllipsis": "正在编辑...",
+ "editing": "正在编辑",
"editedWithExclamation": "已编辑!",
"edited": "已编辑"
},
@@ -141,8 +142,8 @@
"load": {
"loadWithEllipsis": "加载...",
"load": "加载",
- "loadingWithEllipsis": "加载中...",
- "loading": "加载中",
+ "loadingWithEllipsis": "正在加载...",
+ "loading": "正在加载",
"loadedWithExclamation": "已加载!",
"loaded": "已加载"
},
@@ -150,8 +151,8 @@
"save": {
"saveWithEllipsis": "保存...",
"save": "保存",
- "savingWithEllipsis": "保存中...",
- "saving": "保存中",
+ "savingWithEllipsis": "正在保存...",
+ "saving": "正在保存",
"savedWithExclamation": "已保存!",
"saved": "已保存"
},
@@ -162,22 +163,22 @@
},
"saveAsNew": {
- "saveAsNewWithEllipsis": "保存为新文件...",
- "saveAsNew": "保存为新文件"
+ "saveAsNewWithEllipsis": "另存为新...",
+ "saveAsNew": "另存为新"
},
"search": {
"searchWithEllipsis": "搜索...",
"search": "搜索",
- "searchingWithEllipsis": "搜索中...",
- "searching": "搜索中"
+ "searchingWithEllipsis": "正在搜索...",
+ "searching": "正在搜索"
},
"update": {
"updateWithEllipsis": "更新...",
"update": "更新",
- "updatingWithEllipsis": "更新中...",
- "updating": "更新中",
+ "updatingWithEllipsis": "正在更新...",
+ "updating": "正在更新",
"updatedWithExclamation": "已更新!",
"updated": "已更新"
},
@@ -185,8 +186,8 @@
"create": {
"createWithEllipsis": "创建...",
"create": "创建",
- "creatingWithEllipsis": "创建中...",
- "creating": "创建中",
+ "creatingWithEllipsis": "正在创建",
+ "creating": "正在创建",
"createdWithExclamation": "已创建!",
"created": "已创建"
},
@@ -194,21 +195,21 @@
"reset": {
"resetWithEllipsis": "重置...",
"reset": "重置",
- "resettingWithEllipsis": "重置中...",
- "resetting": "重置中"
+ "resettingWithEllipsis": "正在重置...",
+ "resetting": "正在重置"
},
"pause": {
"pause": "暂停",
- "pausingWithEllipsis": "暂停中...",
- "pausing": "暂停中",
+ "pausingWithEllipsis": "正在暂停...",
+ "pausing": "正在暂停",
"paused": "已暂停"
},
"download": {
"download": "下载",
- "downloadingWithEllipsis": "下载中...",
- "downloading": "下载中",
+ "downloadingWithEllipsis": "正在下载...",
+ "downloading": "正在下载",
"downloadedWithExclamation": "已下载!",
"downloaded": "已下载"
},
@@ -216,8 +217,8 @@
"upload": {
"uploadWithEllipsis": "上传...",
"upload": "上传",
- "uploadingWithEllipsis": "上传中...",
- "uploading": "上传中",
+ "uploadingWithEllipsis": "正在上传...",
+ "uploading": "正在上传",
"uploadedWithExclamation": "已上传!",
"uploaded": "已上传"
},
@@ -225,8 +226,8 @@
"remove": {
"removeWithEllipsis": "移除...",
"remove": "移除",
- "removingWithEllipsis": "移除中...",
- "removing": "移除中",
+ "removingWithEllipsis": "正在移除...",
+ "removing": "正在移除",
"removedWithExclamation": "已移除!",
"removed": "已移除"
},
@@ -234,32 +235,32 @@
"uninstall": {
"uninstallWithEllipsis": "卸载...",
"uninstall": "卸载",
- "uninstallingWithEllipsis": "卸载中...",
- "uninstalling": "卸载中",
+ "uninstallingWithEllipsis": "正在卸载...",
+ "uninstalling": "正在卸载",
"uninstalledWithExclamation": "已卸载!",
"uninstalled": "已卸载"
},
"resume": {
- "resumeWithEllipsis": "继续...",
- "resume": "继续",
- "resumingWithEllipsis": "继续中...",
- "resuming": "继续中"
+ "resumeWithEllipsis": "恢复...",
+ "resume": "恢复",
+ "resumingWithEllipsis": "正在恢复...",
+ "resuming": "正在恢复"
},
"start": {
- "startWithEllipsis": "启动...",
- "start": "启动",
- "startingWithEllipsis": "启动中...",
- "starting": "启动中",
- "started": "已启动"
+ "startWithEllipsis": "开始...",
+ "start": "开始",
+ "startingWithEllipsis": "正在开始...",
+ "starting": "正在开始",
+ "started": "已开始"
},
"stop": {
"stopWithEllipsis": "停止...",
"stop": "停止",
- "stoppingWithEllipsis": "停止中...",
- "stopping": "停止中",
+ "stoppingWithEllipsis": "正在停止...",
+ "stopping": "正在停止",
"stoppedWithExclamation": "已停止!",
"stopped": "已停止"
},
@@ -267,8 +268,8 @@
"import": {
"importWithEllipsis": "导入...",
"import": "导入",
- "importingWithEllipsis": "导入中...",
- "importing": "导入中",
+ "importingWithEllipsis": "正在导入...",
+ "importing": "正在导入",
"importedWithExclamation": "已导入!",
"imported": "已导入"
},
@@ -282,22 +283,22 @@
"run": {
"runWithEllipsis": "运行...",
"run": "运行",
- "runningWithEllipsis": "运行中...",
- "running": "运行中"
+ "runningWithEllipsis": "正在运行...",
+ "running": "正在运行"
},
"configure": {
"configureWithEllipsis": "配置...",
"configure": "配置",
- "configuringWithEllipsis": "配置中...",
+ "configuringWithEllipsis": "正在配置...",
"configured": "已配置"
},
"publish": {
"publishWithEllipsis": "发布...",
"publish": "发布",
- "publishingWithEllipsis": "发布中...",
- "publishing": "发布中",
+ "publishingWithEllipsis": "正在发布...",
+ "publishing": "正在发布",
"publishedWithExclamation": "已发布!",
"published": "已发布"
}
diff --git a/zh-CN/sidebar.json b/zh-CN/sidebar.json
index ff7f6346..729d3ce9 100644
--- a/zh-CN/sidebar.json
+++ b/zh-CN/sidebar.json
@@ -1,9 +1,9 @@
{
- "chat": "聊天",
+ "chat": "对话",
"discover": "发现",
"myModels": "我的模型",
"developer": "开发者",
- "runtimes": "运行时间",
+ "runtimes": "运行时",
"settings": "设置",
"download": "下载"
}