From 47b8412695d0bc07705bda54ad07cd9145d09d66 Mon Sep 17 00:00:00 2001 From: jvinolus Date: Wed, 15 Jan 2025 17:05:04 -0800 Subject: [PATCH 001/623] Initialize support for prefixing embeddings --- backend/open_webui/config.py | 12 ++++++++ backend/open_webui/retrieval/utils.py | 40 +++++++++++++------------ backend/open_webui/routers/retrieval.py | 3 +- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index a48b2db0559..ac121672e4d 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1330,6 +1330,18 @@ class BannerModel(BaseModel): ), ) +RAG_EMBEDDING_PASSAGE_PREFIX = PersistentConfig( + "RAG_EMBEDDING_PASSAGE_PREFIX", + "rag.embedding_passage_prefix", + os.environ.get("RAG_EMBEDDING_PASSAGE_PREFIX", False), +) + +RAG_EMBEDDING_QUERY_PREFIX = PersistentConfig( + "RAG_EMBEDDING_QUERY_PREFIX", + "rag.embedding_query_prefix", + os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", False), +) + RAG_RERANKING_MODEL = PersistentConfig( "RAG_RERANKING_MODEL", "rag.reranking_model", diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index c95367e6c3a..e420814d807 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -15,7 +15,7 @@ from open_webui.utils.misc import get_last_user_message from open_webui.env import SRC_LOG_LEVELS, OFFLINE_MODE - +from open_webui.config import RAG_EMBEDDING_QUERY_PREFIX, RAG_EMBEDDING_PASSAGE_PREFIX log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["RAG"]) @@ -39,7 +39,7 @@ def _get_relevant_documents( ) -> list[Document]: result = VECTOR_DB_CLIENT.search( collection_name=self.collection_name, - vectors=[self.embedding_function(query)], + vectors=[self.embedding_function(query,RAG_EMBEDDING_QUERY_PREFIX)], limit=self.top_k, ) @@ -183,7 +183,7 @@ def query_collection( ) -> dict: results = [] for query in queries: - query_embedding = embedding_function(query) + query_embedding = embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX) for collection_name in collection_names: if collection_name: try: @@ -247,26 +247,27 @@ def get_embedding_function( embedding_batch_size, ): if embedding_engine == "": - return lambda query: embedding_function.encode(query).tolist() + return lambda query, prefix: embedding_function.encode(query, prompt = prefix if prefix else None).tolist() elif embedding_engine in ["ollama", "openai"]: - func = lambda query: generate_embeddings( + func = lambda query, prefix: generate_embeddings( engine=embedding_engine, model=embedding_model, text=query, + prefix=prefix, url=url, key=key, ) - def generate_multiple(query, func): + def generate_multiple(query, prefix, func): if isinstance(query, list): embeddings = [] for i in range(0, len(query), embedding_batch_size): - embeddings.extend(func(query[i : i + embedding_batch_size])) + embeddings.extend(func(query[i : i + embedding_batch_size], prefix)) return embeddings else: return func(query) - return lambda query: generate_multiple(query, func) + return lambda query, prefix: generate_multiple(query, prefix, func) def get_sources_from_files( @@ -411,7 +412,7 @@ def get_model_path(model: str, update_model: bool = False): def generate_openai_batch_embeddings( - model: str, texts: list[str], url: str = "https://api.openai.com/v1", key: str = "" + model: str, texts: list[str], url: str = "https://api.openai.com/v1", key: str = "", prefix: str = None ) -> Optional[list[list[float]]]: try: r = requests.post( @@ -420,7 +421,7 @@ def generate_openai_batch_embeddings( "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model}, + json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, "prefix": prefix}, ) r.raise_for_status() data = r.json() @@ -434,7 +435,7 @@ def generate_openai_batch_embeddings( def generate_ollama_batch_embeddings( - model: str, texts: list[str], url: str, key: str = "" + model: str, texts: list[str], url: str, key: str = "", prefix: str = None ) -> Optional[list[list[float]]]: try: r = requests.post( @@ -443,7 +444,7 @@ def generate_ollama_batch_embeddings( "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model}, + json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, "prefix": prefix}, ) r.raise_for_status() data = r.json() @@ -457,25 +458,25 @@ def generate_ollama_batch_embeddings( return None -def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], **kwargs): +def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], prefix: Union[str , None] = None, **kwargs): url = kwargs.get("url", "") key = kwargs.get("key", "") if engine == "ollama": if isinstance(text, list): embeddings = generate_ollama_batch_embeddings( - **{"model": model, "texts": text, "url": url, "key": key} + **{"model": model, "texts": text, "url": url, "key": key, "prefix": prefix} ) else: embeddings = generate_ollama_batch_embeddings( - **{"model": model, "texts": [text], "url": url, "key": key} + **{"model": model, "texts": [text], "url": url, "key": key, "prefix": prefix} ) return embeddings[0] if isinstance(text, str) else embeddings elif engine == "openai": if isinstance(text, list): - embeddings = generate_openai_batch_embeddings(model, text, url, key) + embeddings = generate_openai_batch_embeddings(model, text, url, key, prefix) else: - embeddings = generate_openai_batch_embeddings(model, [text], url, key) + embeddings = generate_openai_batch_embeddings(model, [text], url, key, prefix) return embeddings[0] if isinstance(text, str) else embeddings @@ -512,9 +513,10 @@ def compress_documents( else: from sentence_transformers import util - query_embedding = self.embedding_function(query) + query_embedding = self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX) document_embedding = self.embedding_function( - [doc.page_content for doc in documents] + [doc.page_content for doc in documents], + RAG_EMBEDDING_PASSAGE_PREFIX ) scores = util.cos_sim(query_embedding, document_embedding)[0] diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index c791bde8420..b0c3f8e0429 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -79,6 +79,7 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE, UPLOAD_DIR, DEFAULT_LOCALE, + RAG_EMBEDDING_PASSAGE_PREFIX ) from open_webui.env import ( SRC_LOG_LEVELS, @@ -775,7 +776,7 @@ def _get_docs_info(docs: list[Document]) -> str: ) embeddings = embedding_function( - list(map(lambda x: x.replace("\n", " "), texts)) + list(map(lambda x: x.replace("\n", " "), texts)), RAG_EMBEDDING_PASSAGE_PREFIX ) items = [ From 65443a3a66e2150ef4937e802f22609a23684812 Mon Sep 17 00:00:00 2001 From: Matteo Sirri Date: Mon, 3 Feb 2025 16:35:46 +0000 Subject: [PATCH 002/623] feat: initial commit --- README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0fb03537df1..78d3267ad28 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,17 @@ -# Open WebUI 👋 +# Open WebUI 👋 (FORK FOR E4) + +git remote add upstream https://github.com/open-webui/open-webui.git + +# Fetch changes from upstream +git fetch upstream + +# Merge changes into your main branch +git checkout main +git merge upstream/main + +# Push changes to GitLab +git push origin main + ![GitHub stars](https://img.shields.io/github/stars/open-webui/open-webui?style=social) ![GitHub forks](https://img.shields.io/github/forks/open-webui/open-webui?style=social) From e6715ce8b835052d3e7868178d67e5b4da1bd5d9 Mon Sep 17 00:00:00 2001 From: Matteo Sirri Date: Mon, 3 Feb 2025 16:43:59 +0000 Subject: [PATCH 003/623] docs: fix readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 78d3267ad28..99840d30697 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # Open WebUI 👋 (FORK FOR E4) +# First time git remote add upstream https://github.com/open-webui/open-webui.git # Fetch changes from upstream From 22c100bb6b99e11506b1a0bf8bcbd4c1269e488a Mon Sep 17 00:00:00 2001 From: Matteo Sirri Date: Mon, 3 Feb 2025 16:45:25 +0000 Subject: [PATCH 004/623] feat: add contributing guide --- CONTRIBUTING.md | 196 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..1a2ccc10171 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,196 @@ + +# Contributing Guide + +## Development Guidelines + +### Code Quality Tools + +1. Pre-commit setup: + ```bash + pre-commit install + ``` + +2. Configured hooks: + - YAML checking + - End-of-file fixer + - Trailing whitespace removal + - Ruff (linting + formatting) + - MyPy (type checking) + +### Coding Standards +- Follow PEP 8 guidelines. +- Use type hints consistently. +- Maximum line length: 130 characters. +- Use single quotes for strings. + +### Commit Guidelines +Use Commitizen for standardized commits: +```bash +git cz +``` + +## Git Strategy: Feature branch + +The **Git Feature Branch Workflow** is a way to work on new features in a project without messing up the main code. Instead of working directly on the `main` branch (the "official" code), you create a separate branch for each feature. This keeps the `main` branch clean and stable. + +--- + +## How It Works (Diagram) + + +**Example:** +```bash +git branch -d add-login-button +git push origin --delete add-login-button +``` + + +**Example Workflow (Diagram)** + +Here’s an example of how Mary uses this workflow: + +```mermaid +sequenceDiagram + participant Mary + participant GitHub + participant Bill + + Mary->>GitHub: Create a new branch (add-login-button) + Mary->>Mary: Make changes and commit + Mary->>GitHub: Push branch to remote + Mary->>GitHub: Open a pull request + Bill->>GitHub: Review pull request + Bill->>Mary: Request changes + Mary->>Mary: Fix feedback and commit + Mary->>GitHub: Push updates + Bill->>GitHub: Approve pull request + Mary->>GitHub: Merge branch into main + Mary->>GitHub: Delete feature branch +``` + +--- + +## General Step-by-Step Instructions + +### 1. Start with the main branch +Make sure your local main branch is up-to-date with the latest code from the central repository. + +```bash +git checkout main +git fetch origin +git reset --hard origin/main +``` + +### 2. Create a new branch for your feature +Create a branch for your feature. Use a clear name that describes what you’re working on, like `add-login-button` or `fix-bug-123`. + +```bash +git checkout -b your-branch-name +``` + +**Example:** +```bash +git checkout -b add-login-button +``` + +### 3. Work on your feature +Make changes to the code. After making changes, save your work by following these steps: + +- Check what files you’ve changed: + ```bash + git status + ``` + +- Add the files you want to save: + ```bash + git add + ``` + + **Example:** + ```bash + git add index.html + ``` + +- Save your changes with a message: + ```bash + git commit -m "Describe what you changed" + ``` + + **Example:** + ```bash + git commit -m "Added login button to homepage" + ``` + +### 4. Push your branch to the remote repository +To back up your work and share it with others, push your branch to the central repository. + +```bash +git push -u origin your-branch-name +``` + +**Example:** +```bash +git push -u origin add-login-button +``` + +### 5. Open a pull request +Go to your Git hosting platform (like GitLab) and open a pull request. This is how you ask your team to review your changes and approve them before adding them to the main branch. + +### 6. Fix feedback from reviewers +If your teammates suggest changes, follow these steps to update your branch: + +- Make the changes locally. +- Save the changes: + ```bash + git add + git commit -m "Fixed feedback" + git push + ``` + +### 7. Merge your branch into main +Once your pull request is approved, it’s time to merge your branch into the main branch. + +- Switch to the main branch: + ```bash + git checkout main + ``` + +- Update your local main branch: + ```bash + git pull + ``` + +- Merge your feature branch into main: + ```bash + git merge your-branch-name + ``` + +- Push the updated main branch to the remote repository: + ```bash + git push + ``` + +### 8. Delete your feature branch +After merging, delete your feature branch to keep things clean. + +- Delete the branch locally: + ```bash + git branch -d your-branch-name + ``` + +- Delete the branch from the remote repository: + ```bash + git push origin --delete your-branch-name + ``` + + +## Summary + +- Create a branch for each feature. +- Work on your branch without touching `main`. +- Push your branch to back up your work. +- Open a pull request to get feedback and approval. +- Merge your branch into `main` when it’s ready. +- Delete your branch after merging. + +By following these steps, you’ll keep the `main` branch clean and make it easy for your team to collaborate. From 7b8e5d4e7cb03d79ee832dc1107b8d74a405ae2e Mon Sep 17 00:00:00 2001 From: jvinolus Date: Tue, 4 Feb 2025 13:04:36 -0800 Subject: [PATCH 005/623] Fixed errors and added more support --- backend/open_webui/config.py | 16 ++++++++-------- backend/open_webui/retrieval/utils.py | 12 ++++++++---- backend/open_webui/routers/retrieval.py | 8 ++++---- 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index ac121672e4d..f1b1c14a5c4 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1330,16 +1330,16 @@ class BannerModel(BaseModel): ), ) -RAG_EMBEDDING_PASSAGE_PREFIX = PersistentConfig( - "RAG_EMBEDDING_PASSAGE_PREFIX", - "rag.embedding_passage_prefix", - os.environ.get("RAG_EMBEDDING_PASSAGE_PREFIX", False), +RAG_EMBEDDING_QUERY_PREFIX = ( + os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", None) ) -RAG_EMBEDDING_QUERY_PREFIX = PersistentConfig( - "RAG_EMBEDDING_QUERY_PREFIX", - "rag.embedding_query_prefix", - os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", False), +RAG_EMBEDDING_PASSAGE_PREFIX = ( + os.environ.get("RAG_EMBEDDING_PASSAGE_PREFIX", None) +) + +RAG_EMBEDDING_PREFIX_FIELD_NAME = ( + os.environ.get("RAG_EMBEDDING_PREFIX_FIELD_NAME", "input_type") ) RAG_RERANKING_MODEL = PersistentConfig( diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index e420814d807..544a65a89d9 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -15,7 +15,11 @@ from open_webui.utils.misc import get_last_user_message from open_webui.env import SRC_LOG_LEVELS, OFFLINE_MODE -from open_webui.config import RAG_EMBEDDING_QUERY_PREFIX, RAG_EMBEDDING_PASSAGE_PREFIX +from open_webui.config import ( + RAG_EMBEDDING_QUERY_PREFIX, + RAG_EMBEDDING_PASSAGE_PREFIX, + RAG_EMBEDDING_PREFIX_FIELD_NAME +) log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["RAG"]) @@ -265,7 +269,7 @@ def generate_multiple(query, prefix, func): embeddings.extend(func(query[i : i + embedding_batch_size], prefix)) return embeddings else: - return func(query) + return func(query, prefix) return lambda query, prefix: generate_multiple(query, prefix, func) @@ -421,7 +425,7 @@ def generate_openai_batch_embeddings( "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, "prefix": prefix}, + json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, RAG_EMBEDDING_PREFIX_FIELD_NAME: prefix}, ) r.raise_for_status() data = r.json() @@ -444,7 +448,7 @@ def generate_ollama_batch_embeddings( "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, "prefix": prefix}, + json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, RAG_EMBEDDING_PREFIX_FIELD_NAME: prefix}, ) r.raise_for_status() data = r.json() diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index b0c3f8e0429..255cff11276 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -70,7 +70,6 @@ ) from open_webui.utils.auth import get_admin_user, get_verified_user - from open_webui.config import ( ENV, RAG_EMBEDDING_MODEL_AUTO_UPDATE, @@ -79,7 +78,8 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE, UPLOAD_DIR, DEFAULT_LOCALE, - RAG_EMBEDDING_PASSAGE_PREFIX + RAG_EMBEDDING_PASSAGE_PREFIX, + RAG_EMBEDDING_QUERY_PREFIX ) from open_webui.env import ( SRC_LOG_LEVELS, @@ -1319,7 +1319,7 @@ def query_doc_handler( else: return query_doc( collection_name=form_data.collection_name, - query_embedding=request.app.state.EMBEDDING_FUNCTION(form_data.query), + query_embedding=request.app.state.EMBEDDING_FUNCTION(form_data.query, RAG_EMBEDDING_QUERY_PREFIX), k=form_data.k if form_data.k else request.app.state.config.TOP_K, ) except Exception as e: @@ -1438,7 +1438,7 @@ def reset_upload_dir(user=Depends(get_admin_user)) -> bool: @router.get("/ef/{text}") async def get_embeddings(request: Request, text: Optional[str] = "Hello World!"): - return {"result": request.app.state.EMBEDDING_FUNCTION(text)} + return {"result": request.app.state.EMBEDDING_FUNCTION(text, RAG_EMBEDDING_QUERY_PREFIX)} class BatchProcessFilesForm(BaseModel): From 6d2f87e9044800320656c98a501302f2f6a3f56a Mon Sep 17 00:00:00 2001 From: jayteaftw Date: Wed, 5 Feb 2025 14:03:16 -0800 Subject: [PATCH 006/623] Added server side Prefixing --- backend/open_webui/config.py | 2 +- backend/open_webui/retrieval/utils.py | 25 +++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index f1b1c14a5c4..5635b70a67e 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1339,7 +1339,7 @@ class BannerModel(BaseModel): ) RAG_EMBEDDING_PREFIX_FIELD_NAME = ( - os.environ.get("RAG_EMBEDDING_PREFIX_FIELD_NAME", "input_type") + os.environ.get("RAG_EMBEDDING_PREFIX_FIELD_NAME", None) ) RAG_RERANKING_MODEL = PersistentConfig( diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 544a65a89d9..7a9be9ea941 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -418,14 +418,22 @@ def get_model_path(model: str, update_model: bool = False): def generate_openai_batch_embeddings( model: str, texts: list[str], url: str = "https://api.openai.com/v1", key: str = "", prefix: str = None ) -> Optional[list[list[float]]]: + try: + json_data = { + "input": texts, + "model": model + } + if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str): + json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix + r = requests.post( f"{url}/embeddings", headers={ "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, RAG_EMBEDDING_PREFIX_FIELD_NAME: prefix}, + json=json_data, ) r.raise_for_status() data = r.json() @@ -442,13 +450,20 @@ def generate_ollama_batch_embeddings( model: str, texts: list[str], url: str, key: str = "", prefix: str = None ) -> Optional[list[list[float]]]: try: + json_data = { + "input": texts, + "model": model + } + if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str): + json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix + r = requests.post( f"{url}/api/embed", headers={ "Content-Type": "application/json", "Authorization": f"Bearer {key}", }, - json={"input": texts, "model": model} if not prefix else {"input": texts, "model": model, RAG_EMBEDDING_PREFIX_FIELD_NAME: prefix}, + json=json_data, ) r.raise_for_status() data = r.json() @@ -466,6 +481,12 @@ def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], pr url = kwargs.get("url", "") key = kwargs.get("key", "") + if prefix is not None and RAG_EMBEDDING_PREFIX_FIELD_NAME is None: + if isinstance(text, list): + text = [f'{prefix}{text_element}' for text_element in text] + else: + text = f'{prefix}{text}' + if engine == "ollama": if isinstance(text, list): embeddings = generate_ollama_batch_embeddings( From 35f3824932833fe77ef3bce54b86803cda4838a6 Mon Sep 17 00:00:00 2001 From: Mazurek Michal Date: Fri, 7 Feb 2025 13:44:47 +0100 Subject: [PATCH 007/623] feat: Implement Document Intelligence as Content Extraction Engine --- backend/open_webui/config.py | 12 +++++++ backend/open_webui/main.py | 4 +++ backend/open_webui/retrieval/loaders/main.py | 22 ++++++++++++ backend/open_webui/routers/retrieval.py | 27 +++++++++++++- backend/requirements.txt | 1 + pyproject.toml | 1 + src/lib/apis/retrieval/index.ts | 6 ++++ .../admin/Settings/Documents.svelte | 36 ++++++++++++++++++- 8 files changed, 107 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index bf6f1d02568..e46a87cd5ed 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1431,6 +1431,18 @@ class BannerModel(BaseModel): os.getenv("TIKA_SERVER_URL", "http://tika:9998"), # Default for sidecar deployment ) +DOCUMENT_INTELLIGENCE_ENDPOINT = PersistentConfig( + "DOCUMENT_INTELLIGENCE_ENDPOINT", + "rag.document_intelligence_endpoint", + os.getenv("DOCUMENT_INTELLIGENCE_ENDPOINT", ""), +) + +DOCUMENT_INTELLIGENCE_KEY = PersistentConfig( + "DOCUMENT_INTELLIGENCE_KEY", + "rag.document_intelligence_key", + os.getenv("DOCUMENT_INTELLIGENCE_KEY", ""), +) + RAG_TOP_K = PersistentConfig( "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3")) ) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 863f58dea5e..2f1b92b1d52 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -154,6 +154,8 @@ CHUNK_SIZE, CONTENT_EXTRACTION_ENGINE, TIKA_SERVER_URL, + DOCUMENT_INTELLIGENCE_ENDPOINT, + DOCUMENT_INTELLIGENCE_KEY, RAG_TOP_K, RAG_TEXT_SPLITTER, TIKTOKEN_ENCODING_NAME, @@ -478,6 +480,8 @@ async def lifespan(app: FastAPI): app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL +app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = DOCUMENT_INTELLIGENCE_ENDPOINT +app.state.config.DOCUMENT_INTELLIGENCE_KEY = DOCUMENT_INTELLIGENCE_KEY app.state.config.TEXT_SPLITTER = RAG_TEXT_SPLITTER app.state.config.TIKTOKEN_ENCODING_NAME = TIKTOKEN_ENCODING_NAME diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index a9372f65a60..19d590f5cb6 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -4,6 +4,7 @@ import sys from langchain_community.document_loaders import ( + AzureAIDocumentIntelligenceLoader, BSHTMLLoader, CSVLoader, Docx2txtLoader, @@ -147,6 +148,27 @@ def _get_loader(self, filename: str, file_content_type: str, file_path: str): file_path=file_path, mime_type=file_content_type, ) + elif ( + self.engine == "document_intelligence" + and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != "" + and self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY") != "" + and ( + file_ext in ["pdf", "xls", "xlsx", "docx", "ppt", "pptx"] + or file_content_type + in [ + "application/vnd.ms-excel", + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/vnd.ms-powerpoint", + "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ] + ) + ): + loader = AzureAIDocumentIntelligenceLoader( + file_path=file_path, + api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"), + api_key=self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY"), + ) else: if file_ext == "pdf": loader = PyPDFLoader( diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 77f04a4be53..4cfcd490d10 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -352,6 +352,10 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "content_extraction": { "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE, "tika_server_url": request.app.state.config.TIKA_SERVER_URL, + "document_intelligence_config": { + "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, + "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, + }, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -402,9 +406,15 @@ class FileConfig(BaseModel): max_count: Optional[int] = None +class DocumentIntelligenceConfigForm(BaseModel): + endpoint: str + key: str + + class ContentExtractionConfig(BaseModel): engine: str = "" tika_server_url: Optional[str] = None + document_intelligence_config: Optional[DocumentIntelligenceConfigForm] = None class ChunkParamUpdateForm(BaseModel): @@ -479,13 +489,22 @@ async def update_rag_config( request.app.state.config.FILE_MAX_COUNT = form_data.file.max_count if form_data.content_extraction is not None: - log.info(f"Updating text settings: {form_data.content_extraction}") + log.info( + f"Updating content extraction: {request.app.state.config.CONTENT_EXTRACTION_ENGINE} to {form_data.content_extraction.engine}" + ) request.app.state.config.CONTENT_EXTRACTION_ENGINE = ( form_data.content_extraction.engine ) request.app.state.config.TIKA_SERVER_URL = ( form_data.content_extraction.tika_server_url ) + if form_data.content_extraction.document_intelligence_config is not None: + request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = ( + form_data.content_extraction.document_intelligence_config.endpoint + ) + request.app.state.config.DOCUMENT_INTELLIGENCE_KEY = ( + form_data.content_extraction.document_intelligence_config.key + ) if form_data.chunk is not None: request.app.state.config.TEXT_SPLITTER = form_data.chunk.text_splitter @@ -564,6 +583,10 @@ async def update_rag_config( "content_extraction": { "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE, "tika_server_url": request.app.state.config.TIKA_SERVER_URL, + "document_intelligence_config": { + "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, + "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, + }, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -887,6 +910,8 @@ def process_file( engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE, TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL, PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, + DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, + DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, ) docs = loader.load( file.filename, file.meta.get("content_type"), file_path diff --git a/backend/requirements.txt b/backend/requirements.txt index 14ad4b9cdff..4a39e77b540 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -72,6 +72,7 @@ validators==0.34.0 psutil sentencepiece soundfile==0.13.1 +azure-ai-documentintelligence==1.0.0 opencv-python-headless==4.11.0.86 rapidocr-onnxruntime==1.3.24 diff --git a/pyproject.toml b/pyproject.toml index f121089e8f5..60d54afd6ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,6 +77,7 @@ dependencies = [ "psutil", "sentencepiece", "soundfile==0.13.1", + "azure-ai-documentintelligence==1.0.0", "opencv-python-headless==4.11.0.86", "rapidocr-onnxruntime==1.3.24", diff --git a/src/lib/apis/retrieval/index.ts b/src/lib/apis/retrieval/index.ts index c35c37847b5..ed07ab5d0dd 100644 --- a/src/lib/apis/retrieval/index.ts +++ b/src/lib/apis/retrieval/index.ts @@ -32,9 +32,15 @@ type ChunkConfigForm = { chunk_overlap: number; }; +type DocumentIntelligenceConfigForm = { + key: string; + endpoint: string; +}; + type ContentExtractConfigForm = { engine: string; tika_server_url: string | null; + document_intelligence_config: DocumentIntelligenceConfigForm | null; }; type YoutubeConfigForm = { diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index d3b7cfa01aa..e624a51b3a0 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -50,6 +50,9 @@ let contentExtractionEngine = 'default'; let tikaServerUrl = ''; let showTikaServerUrl = false; + let documentIntelligenceEndpoint = ''; + let documentIntelligenceKey = ''; + let showDocumentIntelligenceConfig = false; let textSplitter = ''; let chunkSize = 0; @@ -175,6 +178,13 @@ toast.error($i18n.t('Tika Server URL required.')); return; } + if ( + contentExtractionEngine === 'document_intelligence' && + (documentIntelligenceEndpoint === '' || documentIntelligenceKey === '') + ) { + toast.error($i18n.t('Document Intelligence endpoint and key required.')); + return; + } const res = await updateRAGConfig(localStorage.token, { pdf_extract_images: pdfExtractImages, enable_google_drive_integration: enableGoogleDriveIntegration, @@ -189,7 +199,11 @@ }, content_extraction: { engine: contentExtractionEngine, - tika_server_url: tikaServerUrl + tika_server_url: tikaServerUrl, + document_intelligence_config: { + key: documentIntelligenceKey, + endpoint: documentIntelligenceEndpoint + } } }); @@ -245,6 +259,9 @@ contentExtractionEngine = res.content_extraction.engine; tikaServerUrl = res.content_extraction.tika_server_url; showTikaServerUrl = contentExtractionEngine === 'tika'; + documentIntelligenceEndpoint = res.content_extraction.document_intelligence_config.endpoint; + documentIntelligenceKey = res.content_extraction.document_intelligence_config.key; + showDocumentIntelligenceConfig = contentExtractionEngine === 'document_intelligence'; fileMaxSize = res?.file.max_size ?? ''; fileMaxCount = res?.file.max_count ?? ''; @@ -568,10 +585,12 @@ bind:value={contentExtractionEngine} on:change={(e) => { showTikaServerUrl = e.target.value === 'tika'; + showDocumentIntelligenceConfig = e.target.value === 'document_intelligence'; }} > + @@ -587,6 +606,21 @@ {/if} + + {#if showDocumentIntelligenceConfig} +
+ + + +
+ {/if}
From 6d62e71c3431f3305a3802970bf5a18055a7fe39 Mon Sep 17 00:00:00 2001 From: Didier FOURNOUT Date: Thu, 13 Feb 2025 15:29:26 +0000 Subject: [PATCH 008/623] Add x-Open-Webui headers for ollama + more for openai --- backend/open_webui/main.py | 4 +- backend/open_webui/routers/ollama.py | 144 ++++++++++++++++++++++++--- backend/open_webui/routers/openai.py | 43 ++++++-- backend/open_webui/utils/chat.py | 4 +- backend/open_webui/utils/models.py | 11 +- 5 files changed, 173 insertions(+), 33 deletions(-) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 88b5b3f6925..3e5f20ceee7 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -858,7 +858,7 @@ def get_filtered_models(models, user): return filtered_models - models = await get_all_models(request) + models = await get_all_models(request, user=user) # Filter out filter pipelines models = [ @@ -898,7 +898,7 @@ async def chat_completion( user=Depends(get_verified_user), ): if not request.app.state.MODELS: - await get_all_models(request) + await get_all_models(request, user=user) model_item = form_data.pop("model_item", {}) tasks = form_data.pop("background_tasks", None) diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index 64373c616ca..e825848d47b 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -14,6 +14,11 @@ import aiohttp from aiocache import cached import requests +from open_webui.models.users import UserModel + +from open_webui.env import ( + ENABLE_FORWARD_USER_INFO_HEADERS, +) from fastapi import ( Depends, @@ -66,12 +71,26 @@ ########################################## -async def send_get_request(url, key=None): +async def send_get_request(url, key=None, user: UserModel = None): timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST) try: async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session: async with session.get( - url, headers={**({"Authorization": f"Bearer {key}"} if key else {})} + url, + headers={ + "Content-Type": "application/json", + **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), + }, ) as response: return await response.json() except Exception as e: @@ -96,6 +115,7 @@ async def send_post_request( stream: bool = True, key: Optional[str] = None, content_type: Optional[str] = None, + user: UserModel = None ): r = None @@ -110,6 +130,16 @@ async def send_post_request( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, ) r.raise_for_status() @@ -191,7 +221,19 @@ async def verify_connection( try: async with session.get( f"{url}/api/version", - headers={**({"Authorization": f"Bearer {key}"} if key else {})}, + headers={ + **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), + }, ) as r: if r.status != 200: detail = f"HTTP Error: {r.status}" @@ -254,7 +296,7 @@ async def update_config( @cached(ttl=3) -async def get_all_models(request: Request): +async def get_all_models(request: Request, user: UserModel=None): log.info("get_all_models()") if request.app.state.config.ENABLE_OLLAMA_API: request_tasks = [] @@ -262,7 +304,7 @@ async def get_all_models(request: Request): if (str(idx) not in request.app.state.config.OLLAMA_API_CONFIGS) and ( url not in request.app.state.config.OLLAMA_API_CONFIGS # Legacy support ): - request_tasks.append(send_get_request(f"{url}/api/tags")) + request_tasks.append(send_get_request(f"{url}/api/tags", user=user)) else: api_config = request.app.state.config.OLLAMA_API_CONFIGS.get( str(idx), @@ -275,7 +317,7 @@ async def get_all_models(request: Request): key = api_config.get("key", None) if enable: - request_tasks.append(send_get_request(f"{url}/api/tags", key)) + request_tasks.append(send_get_request(f"{url}/api/tags", key, user=user)) else: request_tasks.append(asyncio.ensure_future(asyncio.sleep(0, None))) @@ -360,7 +402,7 @@ async def get_ollama_tags( models = [] if url_idx is None: - models = await get_all_models(request) + models = await get_all_models(request, user=user) else: url = request.app.state.config.OLLAMA_BASE_URLS[url_idx] key = get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS) @@ -370,7 +412,19 @@ async def get_ollama_tags( r = requests.request( method="GET", url=f"{url}/api/tags", - headers={**({"Authorization": f"Bearer {key}"} if key else {})}, + headers={ + **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), + }, ) r.raise_for_status() @@ -477,6 +531,7 @@ async def get_ollama_loaded_models(request: Request, user=Depends(get_verified_u url, {} ), # Legacy support ).get("key", None), + user=user ) for idx, url in enumerate(request.app.state.config.OLLAMA_BASE_URLS) ] @@ -509,6 +564,7 @@ async def pull_model( url=f"{url}/api/pull", payload=json.dumps(payload), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -527,7 +583,7 @@ async def push_model( user=Depends(get_admin_user), ): if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS if form_data.name in models: @@ -545,6 +601,7 @@ async def push_model( url=f"{url}/api/push", payload=form_data.model_dump_json(exclude_none=True).encode(), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -571,6 +628,7 @@ async def create_model( url=f"{url}/api/create", payload=form_data.model_dump_json(exclude_none=True).encode(), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -588,7 +646,7 @@ async def copy_model( user=Depends(get_admin_user), ): if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS if form_data.source in models: @@ -609,6 +667,16 @@ async def copy_model( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, data=form_data.model_dump_json(exclude_none=True).encode(), ) @@ -643,7 +711,7 @@ async def delete_model( user=Depends(get_admin_user), ): if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS if form_data.name in models: @@ -665,6 +733,16 @@ async def delete_model( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, ) r.raise_for_status() @@ -693,7 +771,7 @@ async def delete_model( async def show_model_info( request: Request, form_data: ModelNameForm, user=Depends(get_verified_user) ): - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS if form_data.name not in models: @@ -714,6 +792,16 @@ async def show_model_info( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, data=form_data.model_dump_json(exclude_none=True).encode(), ) @@ -757,7 +845,7 @@ async def embed( log.info(f"generate_ollama_batch_embeddings {form_data}") if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS model = form_data.model @@ -783,6 +871,16 @@ async def embed( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, data=form_data.model_dump_json(exclude_none=True).encode(), ) @@ -826,7 +924,7 @@ async def embeddings( log.info(f"generate_ollama_embeddings {form_data}") if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS model = form_data.model @@ -852,6 +950,16 @@ async def embeddings( headers={ "Content-Type": "application/json", **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS and user + else {} + ), }, data=form_data.model_dump_json(exclude_none=True).encode(), ) @@ -901,7 +1009,7 @@ async def generate_completion( user=Depends(get_verified_user), ): if url_idx is None: - await get_all_models(request) + await get_all_models(request, user=user) models = request.app.state.OLLAMA_MODELS model = form_data.model @@ -931,6 +1039,7 @@ async def generate_completion( url=f"{url}/api/generate", payload=form_data.model_dump_json(exclude_none=True).encode(), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -1047,6 +1156,7 @@ async def generate_chat_completion( stream=form_data.stream, key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), content_type="application/x-ndjson", + user=user, ) @@ -1149,6 +1259,7 @@ async def generate_openai_completion( payload=json.dumps(payload), stream=payload.get("stream", False), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -1227,6 +1338,7 @@ async def generate_openai_chat_completion( payload=json.dumps(payload), stream=payload.get("stream", False), key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS), + user=user, ) @@ -1240,7 +1352,7 @@ async def get_openai_models( models = [] if url_idx is None: - model_list = await get_all_models(request) + model_list = await get_all_models(request, user=user) models = [ { "id": model["model"], diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index afda362373f..f0d5d81dd6a 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -26,6 +26,7 @@ ENABLE_FORWARD_USER_INFO_HEADERS, BYPASS_MODEL_ACCESS_CONTROL, ) +from open_webui.models.users import UserModel from open_webui.constants import ERROR_MESSAGES from open_webui.env import ENV, SRC_LOG_LEVELS @@ -51,12 +52,25 @@ ########################################## -async def send_get_request(url, key=None): +async def send_get_request(url, key=None, user: UserModel=None): timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST) try: async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session: async with session.get( - url, headers={**({"Authorization": f"Bearer {key}"} if key else {})} + url, + headers={ + **({"Authorization": f"Bearer {key}"} if key else {}), + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS + else {} + ), + } ) as response: return await response.json() except Exception as e: @@ -247,7 +261,7 @@ async def speech(request: Request, user=Depends(get_verified_user)): raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND) -async def get_all_models_responses(request: Request) -> list: +async def get_all_models_responses(request: Request, user: UserModel) -> list: if not request.app.state.config.ENABLE_OPENAI_API: return [] @@ -271,7 +285,9 @@ async def get_all_models_responses(request: Request) -> list: ): request_tasks.append( send_get_request( - f"{url}/models", request.app.state.config.OPENAI_API_KEYS[idx] + f"{url}/models", + request.app.state.config.OPENAI_API_KEYS[idx], + user=user, ) ) else: @@ -291,6 +307,7 @@ async def get_all_models_responses(request: Request) -> list: send_get_request( f"{url}/models", request.app.state.config.OPENAI_API_KEYS[idx], + user=user, ) ) else: @@ -352,13 +369,13 @@ async def get_filtered_models(models, user): @cached(ttl=3) -async def get_all_models(request: Request) -> dict[str, list]: +async def get_all_models(request: Request, user: UserModel) -> dict[str, list]: log.info("get_all_models()") if not request.app.state.config.ENABLE_OPENAI_API: return {"data": []} - responses = await get_all_models_responses(request) + responses = await get_all_models_responses(request, user=user) def extract_data(response): if response and "data" in response: @@ -418,7 +435,7 @@ async def get_models( } if url_idx is None: - models = await get_all_models(request) + models = await get_all_models(request, user=user) else: url = request.app.state.config.OPENAI_API_BASE_URLS[url_idx] key = request.app.state.config.OPENAI_API_KEYS[url_idx] @@ -515,6 +532,16 @@ async def verify_connection( headers={ "Authorization": f"Bearer {key}", "Content-Type": "application/json", + **( + { + "X-OpenWebUI-User-Name": user.name, + "X-OpenWebUI-User-Id": user.id, + "X-OpenWebUI-User-Email": user.email, + "X-OpenWebUI-User-Role": user.role, + } + if ENABLE_FORWARD_USER_INFO_HEADERS + else {} + ), }, ) as r: if r.status != 200: @@ -587,7 +614,7 @@ async def generate_chat_completion( detail="Model not found", ) - await get_all_models(request) + await get_all_models(request, user=user) model = request.app.state.OPENAI_MODELS.get(model_id) if model: idx = model["urlIdx"] diff --git a/backend/open_webui/utils/chat.py b/backend/open_webui/utils/chat.py index 253eaedfb91..d8f44590b96 100644 --- a/backend/open_webui/utils/chat.py +++ b/backend/open_webui/utils/chat.py @@ -285,7 +285,7 @@ async def stream_wrapper(stream): async def chat_completed(request: Request, form_data: dict, user: Any): if not request.app.state.MODELS: - await get_all_models(request) + await get_all_models(request, user=user) if getattr(request.state, "direct", False) and hasattr(request.state, "model"): models = { @@ -351,7 +351,7 @@ async def chat_action(request: Request, action_id: str, form_data: dict, user: A raise Exception(f"Action not found: {action_id}") if not request.app.state.MODELS: - await get_all_models(request) + await get_all_models(request, user=user) if getattr(request.state, "direct", False) and hasattr(request.state, "model"): models = { diff --git a/backend/open_webui/utils/models.py b/backend/open_webui/utils/models.py index 975f8cb0955..00f8fd6661a 100644 --- a/backend/open_webui/utils/models.py +++ b/backend/open_webui/utils/models.py @@ -22,6 +22,7 @@ ) from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL +from open_webui.models.users import UserModel logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) @@ -29,17 +30,17 @@ log.setLevel(SRC_LOG_LEVELS["MAIN"]) -async def get_all_base_models(request: Request): +async def get_all_base_models(request: Request, user: UserModel=None): function_models = [] openai_models = [] ollama_models = [] if request.app.state.config.ENABLE_OPENAI_API: - openai_models = await openai.get_all_models(request) + openai_models = await openai.get_all_models(request, user=user) openai_models = openai_models["data"] if request.app.state.config.ENABLE_OLLAMA_API: - ollama_models = await ollama.get_all_models(request) + ollama_models = await ollama.get_all_models(request, user=user) ollama_models = [ { "id": model["model"], @@ -58,8 +59,8 @@ async def get_all_base_models(request: Request): return models -async def get_all_models(request): - models = await get_all_base_models(request) +async def get_all_models(request, user: UserModel=None): + models = await get_all_base_models(request, user=user) # If there are no models, return an empty list if len(models) == 0: From 06062568c7f24a6c3a7e76c9070c11941b4018d5 Mon Sep 17 00:00:00 2001 From: Didier FOURNOUT Date: Thu, 13 Feb 2025 16:12:46 +0000 Subject: [PATCH 009/623] black formatting --- backend/open_webui/routers/ollama.py | 10 ++++++---- backend/open_webui/routers/openai.py | 4 ++-- backend/open_webui/utils/models.py | 4 ++-- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index e825848d47b..a3d5064497f 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -115,7 +115,7 @@ async def send_post_request( stream: bool = True, key: Optional[str] = None, content_type: Optional[str] = None, - user: UserModel = None + user: UserModel = None, ): r = None @@ -296,7 +296,7 @@ async def update_config( @cached(ttl=3) -async def get_all_models(request: Request, user: UserModel=None): +async def get_all_models(request: Request, user: UserModel = None): log.info("get_all_models()") if request.app.state.config.ENABLE_OLLAMA_API: request_tasks = [] @@ -317,7 +317,9 @@ async def get_all_models(request: Request, user: UserModel=None): key = api_config.get("key", None) if enable: - request_tasks.append(send_get_request(f"{url}/api/tags", key, user=user)) + request_tasks.append( + send_get_request(f"{url}/api/tags", key, user=user) + ) else: request_tasks.append(asyncio.ensure_future(asyncio.sleep(0, None))) @@ -531,7 +533,7 @@ async def get_ollama_loaded_models(request: Request, user=Depends(get_verified_u url, {} ), # Legacy support ).get("key", None), - user=user + user=user, ) for idx, url in enumerate(request.app.state.config.OLLAMA_BASE_URLS) ] diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index f0d5d81dd6a..1ef913df470 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -52,7 +52,7 @@ ########################################## -async def send_get_request(url, key=None, user: UserModel=None): +async def send_get_request(url, key=None, user: UserModel = None): timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST) try: async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session: @@ -70,7 +70,7 @@ async def send_get_request(url, key=None, user: UserModel=None): if ENABLE_FORWARD_USER_INFO_HEADERS else {} ), - } + }, ) as response: return await response.json() except Exception as e: diff --git a/backend/open_webui/utils/models.py b/backend/open_webui/utils/models.py index 00f8fd6661a..872049f0f85 100644 --- a/backend/open_webui/utils/models.py +++ b/backend/open_webui/utils/models.py @@ -30,7 +30,7 @@ log.setLevel(SRC_LOG_LEVELS["MAIN"]) -async def get_all_base_models(request: Request, user: UserModel=None): +async def get_all_base_models(request: Request, user: UserModel = None): function_models = [] openai_models = [] ollama_models = [] @@ -59,7 +59,7 @@ async def get_all_base_models(request: Request, user: UserModel=None): return models -async def get_all_models(request, user: UserModel=None): +async def get_all_models(request, user: UserModel = None): models = await get_all_base_models(request, user=user) # If there are no models, return an empty list From 2419ef06a0f58f543e3d3ab3d5700d8906c6979f Mon Sep 17 00:00:00 2001 From: Fabio Polito Date: Fri, 14 Feb 2025 12:08:03 +0000 Subject: [PATCH 010/623] feat: docling support for document preprocessing --- backend/open_webui/config.py | 6 + backend/open_webui/main.py | 2 + backend/open_webui/retrieval/loaders/main.py | 61 ++ backend/open_webui/routers/retrieval.py | 7 + .../admin/Settings/Documents.svelte | 31 +- uv.lock | 571 ++---------------- 6 files changed, 163 insertions(+), 515 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index c37b831dec3..9b5bbaa941e 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1378,6 +1378,12 @@ class BannerModel(BaseModel): os.getenv("TIKA_SERVER_URL", "http://tika:9998"), # Default for sidecar deployment ) +DOCLING_SERVER_URL = PersistentConfig( + "DOCLING_SERVER_URL", + "rag.docling_server_url", + os.getenv("DOCLING_SERVER_URL", "http://docling:5001"), +) + RAG_TOP_K = PersistentConfig( "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3")) ) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 00270aabc46..09f268d5938 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -154,6 +154,7 @@ CHUNK_SIZE, CONTENT_EXTRACTION_ENGINE, TIKA_SERVER_URL, + DOCLING_SERVER_URL, RAG_TOP_K, RAG_TEXT_SPLITTER, TIKTOKEN_ENCODING_NAME, @@ -477,6 +478,7 @@ async def lifespan(app: FastAPI): app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL +app.state.config.DOCLING_SERVER_URL = DOCLING_SERVER_URL app.state.config.TEXT_SPLITTER = RAG_TEXT_SPLITTER app.state.config.TIKTOKEN_ENCODING_NAME = TIKTOKEN_ENCODING_NAME diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index a9372f65a60..e305b59b8d8 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -115,6 +115,61 @@ def load(self) -> list[Document]: raise Exception(f"Error calling Tika: {r.reason}") +class DoclingLoader: + def __init__(self, url, file_path=None, mime_type=None): + self.url = url.rstrip("/") # Ensure no trailing slash + self.file_path = file_path + self.mime_type = mime_type + + def load(self) -> list[Document]: + if self.file_path is None: + raise ValueError("File path is required for DoclingLoader") + + with open(self.file_path, "rb") as f: + files = {"files": (self.file_path, f, self.mime_type or "application/octet-stream")} + + params = { + "from_formats": ["docx", "pptx", "html", "xml_pubmed", "image", "pdf", "asciidoc", "md", "xlsx", "xml_uspto", "json_docling"], + "to_formats": ["md"], + "image_export_mode": "placeholder", + "do_ocr": True, + "force_ocr": False, + "ocr_engine": "easyocr", + "ocr_lang": None, + "pdf_backend": "dlparse_v2", + "table_mode": "fast", + "abort_on_error": False, + "return_as_file": False, + "do_table_structure": True, + "include_images": True, + "images_scale": 2.0, + } + + endpoint = f"{self.url}/v1alpha/convert/file" + response = requests.post(endpoint, files=files, data=params) + + if response.ok: + result = response.json() + document_data = result.get("document", {}) + text = document_data.get("md_content", "") + + metadata = {"Content-Type": self.mime_type} if self.mime_type else {} + + log.debug("Docling extracted text: %s", text) + + return [Document(page_content=text, metadata=metadata)] + else: + error_msg = f"Error calling Docling API: {response.status_code}" + if response.text: + try: + error_data = response.json() + if "detail" in error_data: + error_msg += f" - {error_data['detail']}" + except: + error_msg += f" - {response.text}" + raise Exception(f"Error calling Docling: {error_msg}") + + class Loader: def __init__(self, engine: str = "", **kwargs): self.engine = engine @@ -147,6 +202,12 @@ def _get_loader(self, filename: str, file_content_type: str, file_path: str): file_path=file_path, mime_type=file_content_type, ) + elif self.engine == "docling": + loader = DoclingLoader( + url=self.kwargs.get("DOCLING_SERVER_URL"), + file_path=file_path, + mime_type=file_content_type, + ) else: if file_ext == "pdf": loader = PyPDFLoader( diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 2cffd9ead4c..e09611548d6 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -351,6 +351,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "content_extraction": { "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE, "tika_server_url": request.app.state.config.TIKA_SERVER_URL, + "docling_server_url": request.app.state.config.DOCLING_SERVER_URL, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -403,6 +404,7 @@ class FileConfig(BaseModel): class ContentExtractionConfig(BaseModel): engine: str = "" tika_server_url: Optional[str] = None + docling_server_url: Optional[str] = None class ChunkParamUpdateForm(BaseModel): @@ -483,6 +485,9 @@ async def update_rag_config( request.app.state.config.TIKA_SERVER_URL = ( form_data.content_extraction.tika_server_url ) + request.app.state.config.DOCLING_SERVER_URL = ( + form_data.content_extraction.docling_server_url + ) if form_data.chunk is not None: request.app.state.config.TEXT_SPLITTER = form_data.chunk.text_splitter @@ -559,6 +564,7 @@ async def update_rag_config( "content_extraction": { "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE, "tika_server_url": request.app.state.config.TIKA_SERVER_URL, + "docling_server_url": request.app.state.config.DOCLING_SERVER_URL, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -879,6 +885,7 @@ def process_file( loader = Loader( engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE, TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL, + DOCLING_SERVER_URL=request.app.state.config.DOCLING_SERVER_URL, PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, ) docs = loader.load( diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index d3b7cfa01aa..db87dcfbf43 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -50,6 +50,8 @@ let contentExtractionEngine = 'default'; let tikaServerUrl = ''; let showTikaServerUrl = false; + let doclingServerUrl = ''; + let showDoclingServerUrl = false; let textSplitter = ''; let chunkSize = 0; @@ -175,6 +177,12 @@ toast.error($i18n.t('Tika Server URL required.')); return; } + + if (contentExtractionEngine === 'docling' && doclingServerUrl === '') { + toast.error($i18n.t('Docling Server URL required.')); + return; + } + const res = await updateRAGConfig(localStorage.token, { pdf_extract_images: pdfExtractImages, enable_google_drive_integration: enableGoogleDriveIntegration, @@ -189,7 +197,8 @@ }, content_extraction: { engine: contentExtractionEngine, - tika_server_url: tikaServerUrl + tika_server_url: contentExtractionEngine === 'tika' ? tikaServerUrl : undefined, + docling_server_url: contentExtractionEngine === 'docling' ? doclingServerUrl : undefined } }); @@ -231,7 +240,7 @@ await setEmbeddingConfig(); await setRerankingConfig(); - querySettings = await getQuerySettings(localStorage.token); + querySettings = await getQuerySettings(localStorage.token); const res = await getRAGConfig(localStorage.token); @@ -243,8 +252,11 @@ chunkOverlap = res.chunk.chunk_overlap; contentExtractionEngine = res.content_extraction.engine; - tikaServerUrl = res.content_extraction.tika_server_url; + tikaServerUrl = res.content_extraction.tika_server_url ?? ''; + doclingServerUrl = res.content_extraction.docling_server_url ?? ''; // Load doclingServerUrl + showTikaServerUrl = contentExtractionEngine === 'tika'; + showDoclingServerUrl = contentExtractionEngine === 'docling'; fileMaxSize = res?.file.max_size ?? ''; fileMaxCount = res?.file.max_count ?? ''; @@ -568,10 +580,12 @@ bind:value={contentExtractionEngine} on:change={(e) => { showTikaServerUrl = e.target.value === 'tika'; + showDoclingServerUrl = e.target.value === 'docling'; }} > + @@ -587,6 +601,17 @@ {/if} + {#if showDoclingServerUrl} +
+
+ +
+
+ {/if}
diff --git a/uv.lock b/uv.lock index 00b6c29b432..c5fce6d94c8 100644 --- a/uv.lock +++ b/uv.lock @@ -28,16 +28,17 @@ resolution-markers = [ "python_full_version < '3.12' and platform_system == 'Darwin'", "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system == 'Darwin'", "python_full_version < '3.12' and platform_system == 'Darwin'", - "python_full_version < '3.12.4' and platform_system == 'Darwin'", + "python_full_version < '3.12' and platform_system == 'Darwin'", + "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system == 'Darwin'", "python_full_version >= '3.12.4' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", - "python_full_version >= '3.13' and platform_system == 'Darwin'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", @@ -62,16 +63,17 @@ resolution-markers = [ "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", + "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", + "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", @@ -96,16 +98,17 @@ resolution-markers = [ "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", + "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", + "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", + "python_version < '0'", ] [[package]] @@ -180,21 +183,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/24/ce/74ed004d72a3d41933ac729765cd58aea8b61fd287fc870abc42f2d6b978/aiohttp-3.11.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:98f596cf59292e779bc387f22378a3d2c5e052c9fe2bf822ac4f547c6fe57758", size = 1696230 }, { url = "https://files.pythonhosted.org/packages/a5/22/fdba63fc388ec880e99868609761671598b01bb402e063d69c338eaf8a27/aiohttp-3.11.8-cp312-cp312-win32.whl", hash = "sha256:b64fa6b76b35b695cd3e5c42a4e568cbea8d41c9e59165e2a43da00976e2027e", size = 410669 }, { url = "https://files.pythonhosted.org/packages/7e/b8/37683614a4db2763b56376d4a532cceb0496b7984e1596e2da4b7c953166/aiohttp-3.11.8-cp312-cp312-win_amd64.whl", hash = "sha256:afba47981ff73b1794c00dce774334dcfe62664b3b4f78f278b77d21ce9daf43", size = 437086 }, - { url = "https://files.pythonhosted.org/packages/56/12/97a55a4fe36a68e6e51749c2edd546b4792bc47039d78b766273d91178af/aiohttp-3.11.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a81525430da5ca356fae6e889daeb6f5cc0d5f0cef88e59cdde48e2394ea1365", size = 696879 }, - { url = "https://files.pythonhosted.org/packages/da/4c/e84542b25315be8e4ec2fd06cfb31713d940fd94d378d7737f357ec7254c/aiohttp-3.11.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7565689e86a88c1d258351ebd14e343337b76a56ca5c0a2c1db96ec28149386f", size = 459325 }, - { url = "https://files.pythonhosted.org/packages/6b/b5/db278214e5f915c7b203ff66735d1a1e9bfc4e8f331ebe72e74e92cfab7c/aiohttp-3.11.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d0f9dbe9763c014c408ad51a027dc9582518e992dc63e2ffe359ac1b4840a560", size = 452061 }, - { url = "https://files.pythonhosted.org/packages/4a/64/00f313ef75b1ac3d3c0bc408da78ffa0e7698cfd9cd55ab1af3693af74ed/aiohttp-3.11.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca580edc3ccd7f6ea76ad9cf59f5a8756d338e770b5eda7be26bcda8fa7ef53", size = 1662840 }, - { url = "https://files.pythonhosted.org/packages/3b/9d/eaea2168b1bbe13c31c378e887d92802f352cf28ea09acbbffed84eb908e/aiohttp-3.11.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d141631a7348038fc7b5d1a81b3c9afa9aa056188ded7902fe754028fdea5c5", size = 1716479 }, - { url = "https://files.pythonhosted.org/packages/f1/51/37f8e30e2053e472febe091006b0c763d02538acb1f52d6af2e5d0d7e656/aiohttp-3.11.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64e6b14608a56a4c76c60daac730b0c0eeaf9d10dfc3231f7fc26521a0d628fd", size = 1772536 }, - { url = "https://files.pythonhosted.org/packages/6e/de/70b3caf16eb51cc92ba560800d52c2ce0bd71f0cb94eaa22ba0ba93dfe6a/aiohttp-3.11.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0983d0ce329f2f9dbeb355c3744bd6333f34e0dc56025b6b7d4f285b90acb51e", size = 1673785 }, - { url = "https://files.pythonhosted.org/packages/90/40/d9d6164452f05a5019394b0e76ff2068d5b0d85b0213f369c7435264fde0/aiohttp-3.11.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d96b93a46a3742880fa21bcb35c6c40cf27714ec0fb8ec85fe444d73b95131b9", size = 1601468 }, - { url = "https://files.pythonhosted.org/packages/7c/b0/e2b1964aed11246b4bdc35c0f04b4d353fd9826e33b86e382f05f338e51c/aiohttp-3.11.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f4f1779c3142d913c509c2ed1de8b8f920e07a5cd65ac1f57c61cfb6bfded5a4", size = 1614807 }, - { url = "https://files.pythonhosted.org/packages/22/74/f1bd4c746c74520af3fac8efc34f7191a2b07c32f595009e54049e8b3746/aiohttp-3.11.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:48be7cff468c9c0d86a02e6a826e1fe159094b16d5aa2c17703e7317f791b0f9", size = 1616589 }, - { url = "https://files.pythonhosted.org/packages/35/25/283d0da0573a0c32ae00b0d407e4219308c13b338b8f86e0b77339090349/aiohttp-3.11.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:daea456b79ca2bacc7f062845bbb1139c3b3231fc83169da5a682cf385416dd1", size = 1684232 }, - { url = "https://files.pythonhosted.org/packages/51/31/b7dd54d33dd604adb988e4fe4cd35b311f03efc4701743f307041b97e749/aiohttp-3.11.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c92e763cf641e10ad9342597d20060ba23de5e411aada96660e679e3f9371189", size = 1714593 }, - { url = "https://files.pythonhosted.org/packages/bd/8e/76f7919864c755c90696df132686b2a9fd9725e7ad9073db4ac9b52e872f/aiohttp-3.11.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a750ee5a177e0f873d6b2d7d0fa6e1e7c658fc0ca8ea56438dcba2ac94bedb09", size = 1669610 }, - { url = "https://files.pythonhosted.org/packages/ec/93/bde417393de7545c194f0aefc9b4062a2b7d0e8ae8e7c85f5fa74971b433/aiohttp-3.11.8-cp313-cp313-win32.whl", hash = "sha256:4448c9c7f77bad48a6569062c0c16deb77fbb7363de1dc71ed087f66fb3b3c96", size = 409458 }, - { url = "https://files.pythonhosted.org/packages/da/e7/45d57621d9caba3c7d2687618c0e12025e477bd035834cf9ec3334e82810/aiohttp-3.11.8-cp313-cp313-win_amd64.whl", hash = "sha256:481075a1949de79a8a6841e0086f2f5f464785c592cf527ed0db2c0cbd0e1ba2", size = 435403 }, ] [[package]] @@ -475,21 +463,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/53/e1/5120fbb8438a0d718e063f70168a2975e03f00ce6b86e74b8eec079cb492/bitarray-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fcef31b062f756ba7eebcd7890c5d5de84b9d64ee877325257bcc9782288564a", size = 281535 }, { url = "https://files.pythonhosted.org/packages/73/75/8acebbbb4f85dcca73b8e91dde5d3e1e3e2317b36fae4f5b133c60720834/bitarray-3.0.0-cp312-cp312-win32.whl", hash = "sha256:656db7bdf1d81ec3b57b3cad7ec7276765964bcfd0eb81c5d1331f385298169c", size = 114423 }, { url = "https://files.pythonhosted.org/packages/ca/56/dadae4d4351b337de6e0269001fb40f3ebe9f72222190456713d2c1be53d/bitarray-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f785af6b7cb07a9b1e5db0dea9ef9e3e8bb3d74874a0a61303eab9c16acc1999", size = 121680 }, - { url = "https://files.pythonhosted.org/packages/4f/30/07d7be4624981537d32b261dc48a16b03757cc9d88f66012d93acaf11663/bitarray-3.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7cb885c043000924554fe2124d13084c8fdae03aec52c4086915cd4cb87fe8be", size = 172147 }, - { url = "https://files.pythonhosted.org/packages/f0/e9/be1fa2828bad9cb32e1309e6dbd05adcc41679297d9e96bbb372be928e38/bitarray-3.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7814c9924a0b30ecd401f02f082d8697fc5a5be3f8d407efa6e34531ff3c306a", size = 123319 }, - { url = "https://files.pythonhosted.org/packages/22/28/33601d276a6eb76e40fe8a61c61f59cc9ff6d9ecf0b676235c02689475b8/bitarray-3.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bcf524a087b143ba736aebbb054bb399d49e77cf7c04ed24c728e411adc82bfa", size = 121236 }, - { url = "https://files.pythonhosted.org/packages/85/d3/f36b213ffae8f9c8e4c6f12a91e18c06570a04f42d5a1bda4303380f2639/bitarray-3.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1d5abf1d6d910599ac16afdd9a0ed3e24f3b46af57f3070cf2792f236f36e0b", size = 287395 }, - { url = "https://files.pythonhosted.org/packages/b7/1a/2da3b00d876883b05ffd3be9b1311858b48d4a26579f8647860e271c5385/bitarray-3.0.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9929051feeaf8d948cc0b1c9ce57748079a941a1a15c89f6014edf18adaade84", size = 301501 }, - { url = "https://files.pythonhosted.org/packages/88/b9/c1b5af8d1c918f1ee98748f7f7270f932f531c2259dd578c0edcf16ec73e/bitarray-3.0.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96cf0898f8060b2d3ae491762ae871b071212ded97ff9e1e3a5229e9fefe544c", size = 304804 }, - { url = "https://files.pythonhosted.org/packages/92/24/81a10862856419638c0db13e04de7cbf19938353517a67e4848c691f0b7c/bitarray-3.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab37da66a8736ad5a75a58034180e92c41e864da0152b84e71fcc253a2f69cd4", size = 288507 }, - { url = "https://files.pythonhosted.org/packages/da/70/a093af92ef7b207a59087e3b5819e03767fbdda9dd56aada3a4ee25a1fbd/bitarray-3.0.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beeb79e476d19b91fd6a3439853e4e5ba1b3b475920fa40d62bde719c8af786f", size = 278905 }, - { url = "https://files.pythonhosted.org/packages/fb/40/0925c6079c4b282b16eb9085f82df0cdf1f787fb4c67fd4baca3e37acf7f/bitarray-3.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f75fc0198c955d840b836059bd43e0993edbf119923029ca60c4fc017cefa54a", size = 281909 }, - { url = "https://files.pythonhosted.org/packages/61/4b/e11754a5d34cb997250d8019b1fe555d4c06fe2d2a68b0bf7c5580537046/bitarray-3.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f12cc7c7638074918cdcc7491aff897df921b092ffd877227892d2686e98f876", size = 274711 }, - { url = "https://files.pythonhosted.org/packages/5b/78/39513f75423959ee2d82a82e10296b6a7bc7d880b16d714980a6752ef33b/bitarray-3.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dbe1084935b942fab206e609fa1ed3f46ad1f2612fb4833e177e9b2a5e006c96", size = 297038 }, - { url = "https://files.pythonhosted.org/packages/af/a2/5cb81f8773a479de7c06cc1ada36d5cc5a8ebcd8715013e1c4e01a76e84a/bitarray-3.0.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ac06dd72ee1e1b6e312504d06f75220b5894af1fb58f0c20643698f5122aea76", size = 309814 }, - { url = "https://files.pythonhosted.org/packages/03/3e/795b57c6f6eea61c47d0716e1d60219218028b1f260f7328802eac684964/bitarray-3.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:00f9a88c56e373009ac3c73c55205cfbd9683fbd247e2f9a64bae3da78795252", size = 281564 }, - { url = "https://files.pythonhosted.org/packages/f6/31/5914002ae4dd0e0079f8bccfd0647119cff364280d106108a19bd2511933/bitarray-3.0.0-cp313-cp313-win32.whl", hash = "sha256:9c6e52005e91803eb4e08c0a08a481fb55ddce97f926bae1f6fa61b3396b5b61", size = 114404 }, - { url = "https://files.pythonhosted.org/packages/76/0a/184f85a1739db841ae8fbb1d9ec028240d5a351e36abec9cd020de889dab/bitarray-3.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:cb98d5b6eac4b2cf2a5a69f60a9c499844b8bea207059e9fc45c752436e6bb49", size = 121672 }, ] [[package]] @@ -617,17 +590,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, - { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, - { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, - { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, - { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, - { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, - { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, - { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, - { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, ] [[package]] @@ -671,19 +633,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, - { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, - { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, - { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, - { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, - { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, - { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, - { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, - { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, - { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, - { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, - { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, - { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, - { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, ] @@ -974,15 +923,16 @@ sdist = { url = "https://files.pythonhosted.org/packages/7d/7d/60ee3f2b16d9bfdfa [[package]] name = "duckduckgo-search" -version = "6.3.7" +version = "7.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, + { name = "lxml" }, { name = "primp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6d/b7/e6fb81f7629ce690286179dc3690e6c098ce5ef2157735de43d17485ca64/duckduckgo_search-6.3.7.tar.gz", hash = "sha256:53d84966429a6377647e2a1ea7224b657575c7a4d506729bdb837e4ee12915ed", size = 33430 } +sdist = { url = "https://files.pythonhosted.org/packages/0c/e5/8ac183cadbefa444183f4aca22140b44ed399e80a93caf0b338a043a3c7f/duckduckgo_search-7.2.1.tar.gz", hash = "sha256:cb214b6cd9505a41c228445a9c254620b93519c59292662d62ef19d0220618a0", size = 23897 } wheels = [ - { url = "https://files.pythonhosted.org/packages/9f/e6/fef4e3d72be75553268d034ff74433746ced67e4f9731f123979d3503d6c/duckduckgo_search-6.3.7-py3-none-any.whl", hash = "sha256:6a831a27977751e8928222f04c99a5d069ff80e2a7c78b699c9b9ac6cb48c41b", size = 27762 }, + { url = "https://files.pythonhosted.org/packages/bd/8f/ee72af555cd58feb928ff0fd3977913f4ecd0ce8ad92cf4031c36de91776/duckduckgo_search-7.2.1-py3-none-any.whl", hash = "sha256:72ebbf6ad8759e3c3c79521cd66256e7a4ac741c522fd9342db94de91745ef87", size = 19720 }, ] [[package]] @@ -1236,14 +1186,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9c/ec/ade054097976c3d6debc9032e09a351505a0196aa5493edf021be376f75e/fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a", size = 5001832 }, { url = "https://files.pythonhosted.org/packages/e2/cd/233f0e31ad799bb91fc78099c8b4e5ec43b85a131688519640d6bae46f6a/fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07", size = 2162228 }, { url = "https://files.pythonhosted.org/packages/46/45/a498b5291f6c0d91b2394b1ed7447442a57d1c9b9cf8f439aee3c316a56e/fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54", size = 2209118 }, - { url = "https://files.pythonhosted.org/packages/9c/9f/00142a19bad96eeeb1aed93f567adc19b7f2c1af6f5bc0a1c3de90b4b1ac/fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29", size = 2752812 }, - { url = "https://files.pythonhosted.org/packages/b0/20/14b8250d63ba65e162091fb0dda07730f90c303bbf5257e9ddacec7230d9/fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4", size = 2291521 }, - { url = "https://files.pythonhosted.org/packages/34/47/a681cfd10245eb74f65e491a934053ec75c4af639655446558f29818e45e/fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca", size = 4770980 }, - { url = "https://files.pythonhosted.org/packages/d2/6c/a7066afc19db0705a12efd812e19c32cde2b9514eb714659522f2ebd60b6/fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b", size = 4845534 }, - { url = "https://files.pythonhosted.org/packages/0c/a2/3c204fbabbfd845d9bdcab9ae35279d41e9a4bf5c80a0a2708f9c5a195d6/fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048", size = 4753910 }, - { url = "https://files.pythonhosted.org/packages/6e/8c/b4cb3592880340b89e4ef6601b531780bba73862332a6451d78fe135d6cb/fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe", size = 4976411 }, - { url = "https://files.pythonhosted.org/packages/fc/a8/4bf98840ff89fcc188470b59daec57322178bf36d2f4f756cd19a42a826b/fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628", size = 2160178 }, - { url = "https://files.pythonhosted.org/packages/e6/57/4cc35004605416df3225ff362f3455cf09765db00df578ae9e46d0fefd23/fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b", size = 2206102 }, { url = "https://files.pythonhosted.org/packages/99/3b/406d17b1f63e04a82aa621936e6e1c53a8c05458abd66300ac85ea7f9ae9/fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977", size = 1111638 }, ] @@ -1297,21 +1239,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 }, { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 }, { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 }, - { url = "https://files.pythonhosted.org/packages/da/3b/915f0bca8a7ea04483622e84a9bd90033bab54bdf485479556c74fd5eaf5/frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", size = 91538 }, - { url = "https://files.pythonhosted.org/packages/c7/d1/a7c98aad7e44afe5306a2b068434a5830f1470675f0e715abb86eb15f15b/frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", size = 52849 }, - { url = "https://files.pythonhosted.org/packages/3a/c8/76f23bf9ab15d5f760eb48701909645f686f9c64fbb8982674c241fbef14/frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", size = 50583 }, - { url = "https://files.pythonhosted.org/packages/1f/22/462a3dd093d11df623179d7754a3b3269de3b42de2808cddef50ee0f4f48/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", size = 265636 }, - { url = "https://files.pythonhosted.org/packages/80/cf/e075e407fc2ae7328155a1cd7e22f932773c8073c1fc78016607d19cc3e5/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", size = 270214 }, - { url = "https://files.pythonhosted.org/packages/a1/58/0642d061d5de779f39c50cbb00df49682832923f3d2ebfb0fedf02d05f7f/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", size = 273905 }, - { url = "https://files.pythonhosted.org/packages/ab/66/3fe0f5f8f2add5b4ab7aa4e199f767fd3b55da26e3ca4ce2cc36698e50c4/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", size = 250542 }, - { url = "https://files.pythonhosted.org/packages/f6/b8/260791bde9198c87a465224e0e2bb62c4e716f5d198fc3a1dacc4895dbd1/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", size = 267026 }, - { url = "https://files.pythonhosted.org/packages/2e/a4/3d24f88c527f08f8d44ade24eaee83b2627793fa62fa07cbb7ff7a2f7d42/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", size = 257690 }, - { url = "https://files.pythonhosted.org/packages/de/9a/d311d660420b2beeff3459b6626f2ab4fb236d07afbdac034a4371fe696e/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", size = 253893 }, - { url = "https://files.pythonhosted.org/packages/c6/23/e491aadc25b56eabd0f18c53bb19f3cdc6de30b2129ee0bc39cd387cd560/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", size = 267006 }, - { url = "https://files.pythonhosted.org/packages/08/c4/ab918ce636a35fb974d13d666dcbe03969592aeca6c3ab3835acff01f79c/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", size = 276157 }, - { url = "https://files.pythonhosted.org/packages/c0/29/3b7a0bbbbe5a34833ba26f686aabfe982924adbdcafdc294a7a129c31688/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", size = 264642 }, - { url = "https://files.pythonhosted.org/packages/ab/42/0595b3dbffc2e82d7fe658c12d5a5bafcd7516c6bf2d1d1feb5387caa9c1/frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", size = 44914 }, - { url = "https://files.pythonhosted.org/packages/17/c4/b7db1206a3fea44bf3b838ca61deb6f74424a8a5db1dd53ecb21da669be6/frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", size = 51167 }, { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, ] @@ -1484,6 +1411,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/be/8a/fe34d2f3f9470a27b01c9e76226965863f153d5fbe276f83608562e49c04/google_auth_httplib2-0.2.0-py2.py3-none-any.whl", hash = "sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d", size = 9253 }, ] +[[package]] +name = "google-auth-oauthlib" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/0f/1772edb8d75ecf6280f1c7f51cbcebe274e8b17878b382f63738fd96cee5/google_auth_oauthlib-1.2.1.tar.gz", hash = "sha256:afd0cad092a2eaa53cd8e8298557d6de1034c6cb4a740500b5357b648af97263", size = 24970 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/8e/22a28dfbd218033e4eeaf3a0533b2b54852b6530da0c0fe934f0cc494b29/google_auth_oauthlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:2d58a27262d55aa1b87678c3ba7142a080098cbc2024f903c62355deb235d91f", size = 24930 }, +] + [[package]] name = "google-cloud-core" version = "2.4.1" @@ -1598,22 +1538,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, - { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, - { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, - { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, - { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, - { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, - { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, - { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, - { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, - { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, - { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, - { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, - { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, - { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, - { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, - { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, - { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, ] [[package]] @@ -1640,15 +1564,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/94/16550ad6b3f13b96f0856ee5dfc2554efac28539ee84a51d7b14526da985/grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38", size = 6149369 }, { url = "https://files.pythonhosted.org/packages/33/0d/4c3b2587e8ad7f121b597329e6c2620374fccbc2e4e1aa3c73ccc670fde4/grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78", size = 3599176 }, { url = "https://files.pythonhosted.org/packages/7d/36/0c03e2d80db69e2472cf81c6123aa7d14741de7cf790117291a703ae6ae1/grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc", size = 4346574 }, - { url = "https://files.pythonhosted.org/packages/12/d2/2f032b7a153c7723ea3dea08bffa4bcaca9e0e5bdf643ce565b76da87461/grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b", size = 5091487 }, - { url = "https://files.pythonhosted.org/packages/d0/ae/ea2ff6bd2475a082eb97db1104a903cf5fc57c88c87c10b3c3f41a184fc0/grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1", size = 10943530 }, - { url = "https://files.pythonhosted.org/packages/07/62/646be83d1a78edf8d69b56647327c9afc223e3140a744c59b25fbb279c3b/grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af", size = 5589079 }, - { url = "https://files.pythonhosted.org/packages/d0/25/71513d0a1b2072ce80d7f5909a93596b7ed10348b2ea4fdcbad23f6017bf/grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955", size = 6213542 }, - { url = "https://files.pythonhosted.org/packages/76/9a/d21236297111052dcb5dc85cd77dc7bf25ba67a0f55ae028b2af19a704bc/grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8", size = 5850211 }, - { url = "https://files.pythonhosted.org/packages/2d/fe/70b1da9037f5055be14f359026c238821b9bcf6ca38a8d760f59a589aacd/grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62", size = 6572129 }, - { url = "https://files.pythonhosted.org/packages/74/0d/7df509a2cd2a54814598caf2fb759f3e0b93764431ff410f2175a6efb9e4/grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb", size = 6149819 }, - { url = "https://files.pythonhosted.org/packages/0a/08/bc3b0155600898fd10f16b79054e1cca6cb644fa3c250c0fe59385df5e6f/grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121", size = 3596561 }, - { url = "https://files.pythonhosted.org/packages/5a/96/44759eca966720d0f3e1b105c43f8ad4590c97bf8eb3cd489656e9590baa/grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba", size = 4346042 }, ] [[package]] @@ -1770,13 +1685,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/d8/254d16a31d543073a0e57f1c329ca7378d8924e7e292eda72d0064987486/httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81", size = 485289 }, { url = "https://files.pythonhosted.org/packages/5f/3c/4aee161b4b7a971660b8be71a92c24d6c64372c1ab3ae7f366b3680df20f/httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f", size = 489779 }, { url = "https://files.pythonhosted.org/packages/12/b7/5cae71a8868e555f3f67a50ee7f673ce36eac970f029c0c5e9d584352961/httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970", size = 88634 }, - { url = "https://files.pythonhosted.org/packages/94/a3/9fe9ad23fd35f7de6b91eeb60848986058bd8b5a5c1e256f5860a160cc3e/httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660", size = 197214 }, - { url = "https://files.pythonhosted.org/packages/ea/d9/82d5e68bab783b632023f2fa31db20bebb4e89dfc4d2293945fd68484ee4/httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083", size = 102431 }, - { url = "https://files.pythonhosted.org/packages/96/c1/cb499655cbdbfb57b577734fde02f6fa0bbc3fe9fb4d87b742b512908dff/httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3", size = 473121 }, - { url = "https://files.pythonhosted.org/packages/af/71/ee32fd358f8a3bb199b03261f10921716990808a675d8160b5383487a317/httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071", size = 473805 }, - { url = "https://files.pythonhosted.org/packages/8a/0a/0d4df132bfca1507114198b766f1737d57580c9ad1cf93c1ff673e3387be/httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5", size = 448858 }, - { url = "https://files.pythonhosted.org/packages/1e/6a/787004fdef2cabea27bad1073bf6a33f2437b4dbd3b6fb4a9d71172b1c7c/httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0", size = 452042 }, - { url = "https://files.pythonhosted.org/packages/4d/dc/7decab5c404d1d2cdc1bb330b1bf70e83d6af0396fd4fc76fc60c0d522bf/httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8", size = 87682 }, ] [[package]] @@ -1937,21 +1845,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/37/3394bb47bac1ad2cb0465601f86828a0518d07828a650722e55268cdb7e6/jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29", size = 503730 }, { url = "https://files.pythonhosted.org/packages/f9/e2/253fc1fa59103bb4e3aa0665d6ceb1818df1cd7bf3eb492c4dad229b1cd4/jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e", size = 203375 }, { url = "https://files.pythonhosted.org/packages/41/69/6d4bbe66b3b3b4507e47aa1dd5d075919ad242b4b1115b3f80eecd443687/jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c", size = 204740 }, - { url = "https://files.pythonhosted.org/packages/6c/b0/bfa1f6f2c956b948802ef5a021281978bf53b7a6ca54bb126fd88a5d014e/jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84", size = 301190 }, - { url = "https://files.pythonhosted.org/packages/a4/8f/396ddb4e292b5ea57e45ade5dc48229556b9044bad29a3b4b2dddeaedd52/jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4", size = 309334 }, - { url = "https://files.pythonhosted.org/packages/7f/68/805978f2f446fa6362ba0cc2e4489b945695940656edd844e110a61c98f8/jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587", size = 333918 }, - { url = "https://files.pythonhosted.org/packages/b3/99/0f71f7be667c33403fa9706e5b50583ae5106d96fab997fa7e2f38ee8347/jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c", size = 356057 }, - { url = "https://files.pythonhosted.org/packages/8d/50/a82796e421a22b699ee4d2ce527e5bcb29471a2351cbdc931819d941a167/jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18", size = 379790 }, - { url = "https://files.pythonhosted.org/packages/3c/31/10fb012b00f6d83342ca9e2c9618869ab449f1aa78c8f1b2193a6b49647c/jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6", size = 388285 }, - { url = "https://files.pythonhosted.org/packages/c8/81/f15ebf7de57be488aa22944bf4274962aca8092e4f7817f92ffa50d3ee46/jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef", size = 344764 }, - { url = "https://files.pythonhosted.org/packages/b3/e8/0cae550d72b48829ba653eb348cdc25f3f06f8a62363723702ec18e7be9c/jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1", size = 376620 }, - { url = "https://files.pythonhosted.org/packages/b8/50/e5478ff9d82534a944c03b63bc217c5f37019d4a34d288db0f079b13c10b/jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9", size = 510402 }, - { url = "https://files.pythonhosted.org/packages/8e/1e/3de48bbebbc8f7025bd454cedc8c62378c0e32dd483dece5f4a814a5cb55/jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05", size = 503018 }, - { url = "https://files.pythonhosted.org/packages/d5/cd/d5a5501d72a11fe3e5fd65c78c884e5164eefe80077680533919be22d3a3/jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a", size = 203190 }, - { url = "https://files.pythonhosted.org/packages/51/bf/e5ca301245ba951447e3ad677a02a64a8845b185de2603dabd83e1e4b9c6/jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865", size = 203551 }, - { url = "https://files.pythonhosted.org/packages/2f/3c/71a491952c37b87d127790dd7a0b1ebea0514c6b6ad30085b16bbe00aee6/jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca", size = 308347 }, - { url = "https://files.pythonhosted.org/packages/a0/4c/c02408042e6a7605ec063daed138e07b982fdb98467deaaf1c90950cf2c6/jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0", size = 342875 }, - { url = "https://files.pythonhosted.org/packages/91/61/c80ef80ed8a0a21158e289ef70dac01e351d929a1c30cb0f49be60772547/jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566", size = 202374 }, ] [[package]] @@ -2202,23 +2095,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/ed/e6276c8d9668028213df01f598f385b05b55a4e1b4662ee12ef05dab35aa/lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d", size = 5012542 }, { url = "https://files.pythonhosted.org/packages/36/88/684d4e800f5aa28df2a991a6a622783fb73cf0e46235cfa690f9776f032e/lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30", size = 3486454 }, { url = "https://files.pythonhosted.org/packages/fc/82/ace5a5676051e60355bd8fb945df7b1ba4f4fb8447f2010fb816bfd57724/lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f", size = 3816857 }, - { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, - { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, - { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, - { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, - { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, - { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, - { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, - { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, - { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, - { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, - { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, - { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, - { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, - { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, - { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, - { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, - { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, ] [[package]] @@ -2280,26 +2156,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, - { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, - { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, - { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, - { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, - { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, - { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, - { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, - { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, - { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, - { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, - { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, - { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, - { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, - { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, - { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, - { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, - { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, - { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, - { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, - { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, ] [[package]] @@ -2375,22 +2231,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9b/fd/eb1a3573cda74d4c2381d10ded62c128e869954ced1881c15e2bcd97a48f/mmh3-5.0.1-cp312-cp312-win32.whl", hash = "sha256:842516acf04da546f94fad52db125ee619ccbdcada179da51c326a22c4578cb9", size = 39206 }, { url = "https://files.pythonhosted.org/packages/66/e8/542ed252924002b84c43a68a080cfd4facbea0d5df361e4f59637638d3c7/mmh3-5.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:d963be0dbfd9fca209c17172f6110787ebf78934af25e3694fe2ba40e55c1e2b", size = 39799 }, { url = "https://files.pythonhosted.org/packages/bd/25/ff2cd36c82a23afa57a05cdb52ab467a911fb12c055c8a8238c0d426cbf0/mmh3-5.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:a5da292ceeed8ce8e32b68847261a462d30fd7b478c3f55daae841404f433c15", size = 36537 }, - { url = "https://files.pythonhosted.org/packages/09/e0/fb19c46265c18311b422ba5ce3e18046ad45c48cfb213fd6dbec23ae6b51/mmh3-5.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:673e3f1c8d4231d6fb0271484ee34cb7146a6499fc0df80788adb56fd76842da", size = 52909 }, - { url = "https://files.pythonhosted.org/packages/c3/94/54fc591e7a24c7ce2c531ecfc5715cff932f9d320c2936550cc33d67304d/mmh3-5.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f795a306bd16a52ad578b663462cc8e95500b3925d64118ae63453485d67282b", size = 38396 }, - { url = "https://files.pythonhosted.org/packages/1f/9a/142bcc9d0d28fc8ae45bbfb83926adc069f984cdf3495a71534cc22b8e27/mmh3-5.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5ed57a5e28e502a1d60436cc25c76c3a5ba57545f250f2969af231dc1221e0a5", size = 38207 }, - { url = "https://files.pythonhosted.org/packages/f8/5b/f1c9110aa70321bb1ee713f17851b9534586c63bc25e0110e4fc03ae2450/mmh3-5.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:632c28e7612e909dbb6cbe2fe496201ada4695b7715584005689c5dc038e59ad", size = 94988 }, - { url = "https://files.pythonhosted.org/packages/87/e5/4dc67e7e0e716c641ab0a5875a659e37258417439590feff5c3bd3ff4538/mmh3-5.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53fd6bd525a5985e391c43384672d9d6b317fcb36726447347c7fc75bfed34ec", size = 99969 }, - { url = "https://files.pythonhosted.org/packages/ac/68/d148327337687c53f04ad9ceaedfa9ad155ee0111d0cb06220f044d66720/mmh3-5.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dceacf6b0b961a0e499836af3aa62d60633265607aef551b2a3e3c48cdaa5edd", size = 99662 }, - { url = "https://files.pythonhosted.org/packages/13/79/782adb6df6397947c1097b1e94b7f8d95629a4a73df05cf7207bd5148c1f/mmh3-5.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0738d478fdfb5d920f6aff5452c78f2c35b0eff72caa2a97dfe38e82f93da2", size = 87606 }, - { url = "https://files.pythonhosted.org/packages/f2/c2/0404383281df049d0e4ccf07fabd659fc1f3da834df6708d934116cbf45d/mmh3-5.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e70285e7391ab88b872e5bef632bad16b9d99a6d3ca0590656a4753d55988af", size = 94836 }, - { url = "https://files.pythonhosted.org/packages/c8/33/fda67c5f28e4c2131891cf8cbc3513cfc55881e3cfe26e49328e38ffacb3/mmh3-5.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:27e5fc6360aa6b828546a4318da1a7da6bf6e5474ccb053c3a6aa8ef19ff97bd", size = 90492 }, - { url = "https://files.pythonhosted.org/packages/64/2f/0ed38aefe2a87f30bb1b12e5b75dc69fcffdc16def40d1752d6fc7cbbf96/mmh3-5.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7989530c3c1e2c17bf5a0ec2bba09fd19819078ba90beedabb1c3885f5040b0d", size = 89594 }, - { url = "https://files.pythonhosted.org/packages/95/ab/6e7a5e765fc78e3dbd0a04a04cfdf72e91eb8e31976228e69d82c741a5b4/mmh3-5.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cdad7bee649950da7ecd3cbbbd12fb81f1161072ecbdb5acfa0018338c5cb9cf", size = 94929 }, - { url = "https://files.pythonhosted.org/packages/74/51/f748f00c072006f4a093d9b08853a0e2e3cd5aeaa91343d4e2d942851978/mmh3-5.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e143b8f184c1bb58cecd85ab4a4fd6dc65a2d71aee74157392c3fddac2a4a331", size = 91317 }, - { url = "https://files.pythonhosted.org/packages/df/a1/21ee8017a7feb0270c49f756ff56da9f99bd150dcfe3b3f6f0d4b243423d/mmh3-5.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5eb12e886f3646dd636f16b76eb23fc0c27e8ff3c1ae73d4391e50ef60b40f6", size = 89861 }, - { url = "https://files.pythonhosted.org/packages/c2/d2/46a6d070de4659bdf91cd6a62d659f8cc547dadee52b6d02bcbacb3262ed/mmh3-5.0.1-cp313-cp313-win32.whl", hash = "sha256:16e6dddfa98e1c2d021268e72c78951234186deb4df6630e984ac82df63d0a5d", size = 39201 }, - { url = "https://files.pythonhosted.org/packages/ed/07/316c062f09019b99b248a4183c5333f8eeebe638345484774908a8f2c9c0/mmh3-5.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:d3ffb792d70b8c4a2382af3598dad6ae0c5bd9cee5b7ffcc99aa2f5fd2c1bf70", size = 39807 }, - { url = "https://files.pythonhosted.org/packages/9d/d3/f7e6d7d062b8d7072c3989a528d9d47486ee5d5ae75250f6e26b4976d098/mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896", size = 36539 }, ] [[package]] @@ -2486,21 +2326,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 }, { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 }, { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 }, - { url = "https://files.pythonhosted.org/packages/22/67/1c7c0f39fe069aa4e5d794f323be24bf4d33d62d2a348acdb7991f8f30db/multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", size = 48771 }, - { url = "https://files.pythonhosted.org/packages/3c/25/c186ee7b212bdf0df2519eacfb1981a017bda34392c67542c274651daf23/multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", size = 29533 }, - { url = "https://files.pythonhosted.org/packages/67/5e/04575fd837e0958e324ca035b339cea174554f6f641d3fb2b4f2e7ff44a2/multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", size = 29595 }, - { url = "https://files.pythonhosted.org/packages/d3/b2/e56388f86663810c07cfe4a3c3d87227f3811eeb2d08450b9e5d19d78876/multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", size = 130094 }, - { url = "https://files.pythonhosted.org/packages/6c/ee/30ae9b4186a644d284543d55d491fbd4239b015d36b23fea43b4c94f7052/multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", size = 134876 }, - { url = "https://files.pythonhosted.org/packages/84/c7/70461c13ba8ce3c779503c70ec9d0345ae84de04521c1f45a04d5f48943d/multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", size = 133500 }, - { url = "https://files.pythonhosted.org/packages/4a/9f/002af221253f10f99959561123fae676148dd730e2daa2cd053846a58507/multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", size = 131099 }, - { url = "https://files.pythonhosted.org/packages/82/42/d1c7a7301d52af79d88548a97e297f9d99c961ad76bbe6f67442bb77f097/multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", size = 120403 }, - { url = "https://files.pythonhosted.org/packages/68/f3/471985c2c7ac707547553e8f37cff5158030d36bdec4414cb825fbaa5327/multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", size = 125348 }, - { url = "https://files.pythonhosted.org/packages/67/2c/e6df05c77e0e433c214ec1d21ddd203d9a4770a1f2866a8ca40a545869a0/multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", size = 119673 }, - { url = "https://files.pythonhosted.org/packages/c5/cd/bc8608fff06239c9fb333f9db7743a1b2eafe98c2666c9a196e867a3a0a4/multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", size = 129927 }, - { url = "https://files.pythonhosted.org/packages/44/8e/281b69b7bc84fc963a44dc6e0bbcc7150e517b91df368a27834299a526ac/multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", size = 128711 }, - { url = "https://files.pythonhosted.org/packages/12/a4/63e7cd38ed29dd9f1881d5119f272c898ca92536cdb53ffe0843197f6c85/multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", size = 125519 }, - { url = "https://files.pythonhosted.org/packages/38/e0/4f5855037a72cd8a7a2f60a3952d9aa45feedb37ae7831642102604e8a37/multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", size = 26426 }, - { url = "https://files.pythonhosted.org/packages/7e/a5/17ee3a4db1e310b7405f5d25834460073a8ccd86198ce044dfaf69eac073/multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", size = 28531 }, { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, ] @@ -2513,8 +2338,11 @@ dependencies = [ ] sdist = { url = "https://files.pythonhosted.org/packages/b5/ae/04f39c5d0d0def03247c2893d6f2b83c136bf3320a2154d7b8858f2ba72d/multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", size = 1772603 } wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/f7/7ec7fddc92e50714ea3745631f79bd9c96424cb2702632521028e57d3a36/multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", size = 134824 }, { url = "https://files.pythonhosted.org/packages/50/15/b56e50e8debaf439f44befec5b2af11db85f6e0f344c3113ae0be0593a91/multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", size = 143519 }, { url = "https://files.pythonhosted.org/packages/0a/7d/a988f258104dcd2ccf1ed40fdc97e26c4ac351eeaf81d76e266c52d84e2f/multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e", size = 146741 }, + { url = "https://files.pythonhosted.org/packages/ea/89/38df130f2c799090c978b366cfdf5b96d08de5b29a4a293df7f7429fa50b/multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", size = 132628 }, + { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 }, ] [[package]] @@ -2614,7 +2442,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 }, { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 }, - { url = "https://files.pythonhosted.org/packages/e2/2a/4f27ca96232e8b5269074a72e03b4e0d43aa68c9b965058b1684d07c6ff8/nvidia_cublas_cu12-12.4.5.8-py3-none-win_amd64.whl", hash = "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc", size = 396895858 }, ] [[package]] @@ -2624,7 +2451,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 }, { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 }, - { url = "https://files.pythonhosted.org/packages/f3/79/8cf313ec17c58ccebc965568e5bcb265cdab0a1df99c4e674bb7a3b99bfe/nvidia_cuda_cupti_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922", size = 9938035 }, ] [[package]] @@ -2634,7 +2460,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 }, { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 }, - { url = "https://files.pythonhosted.org/packages/7c/30/8c844bfb770f045bcd8b2c83455c5afb45983e1a8abf0c4e5297b481b6a5/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec", size = 19751955 }, ] [[package]] @@ -2644,7 +2469,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 }, { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 }, - { url = "https://files.pythonhosted.org/packages/a8/8b/450e93fab75d85a69b50ea2d5fdd4ff44541e0138db16f9cd90123ef4de4/nvidia_cuda_runtime_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e", size = 878808 }, ] [[package]] @@ -2656,7 +2480,6 @@ dependencies = [ ] wheels = [ { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 }, - { url = "https://files.pythonhosted.org/packages/3f/d0/f90ee6956a628f9f04bf467932c0a25e5a7e706a684b896593c06c82f460/nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a", size = 679925892 }, ] [[package]] @@ -2669,7 +2492,6 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 }, { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 }, - { url = "https://files.pythonhosted.org/packages/f6/ee/3f3f8e9874f0be5bbba8fb4b62b3de050156d159f8b6edc42d6f1074113b/nvidia_cufft_cu12-11.2.1.3-py3-none-win_amd64.whl", hash = "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b", size = 210576476 }, ] [[package]] @@ -2679,7 +2501,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 }, { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 }, - { url = "https://files.pythonhosted.org/packages/1c/22/2573503d0d4e45673c263a313f79410e110eb562636b0617856fdb2ff5f6/nvidia_curand_cu12-10.3.5.147-py3-none-win_amd64.whl", hash = "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771", size = 55799918 }, ] [[package]] @@ -2694,7 +2515,6 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 }, { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 }, - { url = "https://files.pythonhosted.org/packages/f2/be/d435b7b020e854d5d5a682eb5de4328fd62f6182507406f2818280e206e2/nvidia_cusolver_cu12-11.6.1.9-py3-none-win_amd64.whl", hash = "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c", size = 125224015 }, ] [[package]] @@ -2707,7 +2527,6 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 }, { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 }, - { url = "https://files.pythonhosted.org/packages/a2/e0/3155ca539760a8118ec94cc279b34293309bcd14011fc724f87f31988843/nvidia_cusparse_cu12-12.3.1.170-py3-none-win_amd64.whl", hash = "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f", size = 204684315 }, ] [[package]] @@ -2725,7 +2544,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 }, { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 }, - { url = "https://files.pythonhosted.org/packages/81/19/0babc919031bee42620257b9a911c528f05fb2688520dcd9ca59159ffea8/nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1", size = 95336325 }, ] [[package]] @@ -2735,7 +2553,6 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 }, { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 }, - { url = "https://files.pythonhosted.org/packages/54/1b/f77674fbb73af98843be25803bbd3b9a4f0a96c75b8d33a2854a5c7d2d77/nvidia_nvtx_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485", size = 66307 }, ] [[package]] @@ -2796,17 +2613,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/42/2f71f5680834688a9c81becbe5c5bb996fd33eaed5c66ae0606c3b1d6a02/onnxruntime-1.20.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bb71a814f66517a65628c9e4a2bb530a6edd2cd5d87ffa0af0f6f773a027d99e", size = 13333903 }, { url = "https://files.pythonhosted.org/packages/c8/f1/aabfdf91d013320aa2fc46cf43c88ca0182860ff15df872b4552254a9680/onnxruntime-1.20.1-cp312-cp312-win32.whl", hash = "sha256:bd386cc9ee5f686ee8a75ba74037750aca55183085bf1941da8efcfe12d5b120", size = 9814562 }, { url = "https://files.pythonhosted.org/packages/dd/80/76979e0b744307d488c79e41051117634b956612cc731f1028eb17ee7294/onnxruntime-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:19c2d843eb074f385e8bbb753a40df780511061a63f9def1b216bf53860223fb", size = 11331482 }, - { url = "https://files.pythonhosted.org/packages/f7/71/c5d980ac4189589267a06f758bd6c5667d07e55656bed6c6c0580733ad07/onnxruntime-1.20.1-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:cc01437a32d0042b606f462245c8bbae269e5442797f6213e36ce61d5abdd8cc", size = 31007574 }, - { url = "https://files.pythonhosted.org/packages/81/0d/13bbd9489be2a6944f4a940084bfe388f1100472f38c07080a46fbd4ab96/onnxruntime-1.20.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb44b08e017a648924dbe91b82d89b0c105b1adcfe31e90d1dc06b8677ad37be", size = 11951459 }, - { url = "https://files.pythonhosted.org/packages/c0/ea/4454ae122874fd52bbb8a961262de81c5f932edeb1b72217f594c700d6ef/onnxruntime-1.20.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bda6aebdf7917c1d811f21d41633df00c58aff2bef2f598f69289c1f1dabc4b3", size = 13331620 }, - { url = "https://files.pythonhosted.org/packages/d8/e0/50db43188ca1c945decaa8fc2a024c33446d31afed40149897d4f9de505f/onnxruntime-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:d30367df7e70f1d9fc5a6a68106f5961686d39b54d3221f760085524e8d38e16", size = 11331758 }, - { url = "https://files.pythonhosted.org/packages/d8/55/3821c5fd60b52a6c82a00bba18531793c93c4addfe64fbf061e235c5617a/onnxruntime-1.20.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9158465745423b2b5d97ed25aa7740c7d38d2993ee2e5c3bfacb0c4145c49d8", size = 11950342 }, - { url = "https://files.pythonhosted.org/packages/14/56/fd990ca222cef4f9f4a9400567b9a15b220dee2eafffb16b2adbc55c8281/onnxruntime-1.20.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0df6f2df83d61f46e842dbcde610ede27218947c33e994545a22333491e72a3b", size = 13337040 }, ] [[package]] name = "open-webui" -version = "0.5.5" +version = "0.5.7" source = { editable = "." } dependencies = [ { name = "aiocache" }, @@ -2836,6 +2647,9 @@ dependencies = [ { name = "fpdf2" }, { name = "ftfy" }, { name = "gcp-storage-emulator" }, + { name = "google-api-python-client" }, + { name = "google-auth-httplib2" }, + { name = "google-auth-oauthlib" }, { name = "google-cloud-storage" }, { name = "google-generativeai" }, { name = "googleapis-common-protos" }, @@ -2910,7 +2724,7 @@ requires-dist = [ { name = "colbert-ai", specifier = "==0.2.21" }, { name = "docker", specifier = "~=7.1.0" }, { name = "docx2txt", specifier = "==0.8" }, - { name = "duckduckgo-search", specifier = "~=6.3.5" }, + { name = "duckduckgo-search", specifier = "~=7.2.1" }, { name = "einops", specifier = "==0.8.0" }, { name = "extract-msg" }, { name = "fake-useragent", specifier = "==1.5.1" }, @@ -2921,6 +2735,9 @@ requires-dist = [ { name = "fpdf2", specifier = "==2.8.2" }, { name = "ftfy", specifier = "==6.2.3" }, { name = "gcp-storage-emulator", specifier = ">=2024.8.3" }, + { name = "google-api-python-client" }, + { name = "google-auth-httplib2" }, + { name = "google-auth-oauthlib" }, { name = "google-cloud-storage", specifier = "==2.19.0" }, { name = "google-generativeai", specifier = "==0.7.2" }, { name = "googleapis-common-protos", specifier = "==1.63.2" }, @@ -3215,15 +3032,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f8/26/68513e28b3bd1d7633318ed2818e86d1bfc8b782c87c520c7b363092837f/orjson-3.10.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780", size = 129798 }, { url = "https://files.pythonhosted.org/packages/44/ca/020fb99c98ff7267ba18ce798ff0c8c3aa97cd949b611fc76cad3c87e534/orjson-3.10.14-cp312-cp312-win32.whl", hash = "sha256:d5075c54edf1d6ad81d4c6523ce54a748ba1208b542e54b97d8a882ecd810fd1", size = 142524 }, { url = "https://files.pythonhosted.org/packages/70/7f/f2d346819a273653825e7c92dc26418c8da506003c9fc1dfe8157e733b2e/orjson-3.10.14-cp312-cp312-win_amd64.whl", hash = "sha256:175cafd322e458603e8ce73510a068d16b6e6f389c13f69bf16de0e843d7d406", size = 133663 }, - { url = "https://files.pythonhosted.org/packages/46/bb/f1b037d89f580c79eda0940772384cc226a697be1cb4eb94ae4e792aa34c/orjson-3.10.14-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:0905ca08a10f7e0e0c97d11359609300eb1437490a7f32bbaa349de757e2e0c7", size = 249333 }, - { url = "https://files.pythonhosted.org/packages/e4/72/12958a073cace3f8acef0f9a30739d95f46bbb1544126fecad11527d4508/orjson-3.10.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92d13292249f9f2a3e418cbc307a9fbbef043c65f4bd8ba1eb620bc2aaba3d15", size = 125038 }, - { url = "https://files.pythonhosted.org/packages/c0/ae/461f78b1c98de1bc034af88bc21c6a792cc63373261fbc10a6ee560814fa/orjson-3.10.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90937664e776ad316d64251e2fa2ad69265e4443067668e4727074fe39676414", size = 130604 }, - { url = "https://files.pythonhosted.org/packages/ae/d2/17f50513f56bff7898840fddf7fb88f501305b9b2605d2793ff224789665/orjson-3.10.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9ed3d26c4cb4f6babaf791aa46a029265850e80ec2a566581f5c2ee1a14df4f1", size = 130756 }, - { url = "https://files.pythonhosted.org/packages/fa/bc/673856e4af94c9890dfd8e2054c05dc2ddc16d1728c2aa0c5bd198943105/orjson-3.10.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:56ee546c2bbe9599aba78169f99d1dc33301853e897dbaf642d654248280dc6e", size = 414613 }, - { url = "https://files.pythonhosted.org/packages/09/01/08c5b69b0756dd1790fcffa569d6a28dedcd7b97f825e4b46537b788908c/orjson-3.10.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:901e826cb2f1bdc1fcef3ef59adf0c451e8f7c0b5deb26c1a933fb66fb505eae", size = 141010 }, - { url = "https://files.pythonhosted.org/packages/5b/98/72883bb6cf88fd364996e62d2026622ca79bfb8dbaf96ccdd2018ada25b1/orjson-3.10.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26336c0d4b2d44636e1e1e6ed1002f03c6aae4a8a9329561c8883f135e9ff010", size = 129732 }, - { url = "https://files.pythonhosted.org/packages/e4/99/347418f7ef56dcb478ba131a6112b8ddd5b747942652b6e77a53155a7e21/orjson-3.10.14-cp313-cp313-win32.whl", hash = "sha256:e2bc525e335a8545c4e48f84dd0328bc46158c9aaeb8a1c2276546e94540ea3d", size = 142504 }, - { url = "https://files.pythonhosted.org/packages/59/ac/5e96cad01083015f7bfdb02ccafa489da8e6caa7f4c519e215f04d2bd856/orjson-3.10.14-cp313-cp313-win_amd64.whl", hash = "sha256:eca04dfd792cedad53dc9a917da1a522486255360cb4e77619343a20d9f35364", size = 133388 }, ] [[package]] @@ -3270,19 +3078,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, - { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, - { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, - { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, - { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, - { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, - { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, - { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, - { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, - { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, - { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, - { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, - { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, - { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, ] [[package]] @@ -3380,25 +3175,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e7/c4/fc6e86750523f367923522014b821c11ebc5ad402e659d8c9d09b3c9d70c/pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6", size = 2291630 }, { url = "https://files.pythonhosted.org/packages/08/5c/2104299949b9d504baf3f4d35f73dbd14ef31bbd1ddc2c1b66a5b7dfda44/pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf", size = 2626369 }, { url = "https://files.pythonhosted.org/packages/37/f3/9b18362206b244167c958984b57c7f70a0289bfb59a530dd8af5f699b910/pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5", size = 2375240 }, - { url = "https://files.pythonhosted.org/packages/b3/31/9ca79cafdce364fd5c980cd3416c20ce1bebd235b470d262f9d24d810184/pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc", size = 3226640 }, - { url = "https://files.pythonhosted.org/packages/ac/0f/ff07ad45a1f172a497aa393b13a9d81a32e1477ef0e869d030e3c1532521/pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0", size = 3101437 }, - { url = "https://files.pythonhosted.org/packages/08/2f/9906fca87a68d29ec4530be1f893149e0cb64a86d1f9f70a7cfcdfe8ae44/pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1", size = 4326605 }, - { url = "https://files.pythonhosted.org/packages/b0/0f/f3547ee15b145bc5c8b336401b2d4c9d9da67da9dcb572d7c0d4103d2c69/pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec", size = 4411173 }, - { url = "https://files.pythonhosted.org/packages/b1/df/bf8176aa5db515c5de584c5e00df9bab0713548fd780c82a86cba2c2fedb/pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5", size = 4369145 }, - { url = "https://files.pythonhosted.org/packages/de/7c/7433122d1cfadc740f577cb55526fdc39129a648ac65ce64db2eb7209277/pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114", size = 4496340 }, - { url = "https://files.pythonhosted.org/packages/25/46/dd94b93ca6bd555588835f2504bd90c00d5438fe131cf01cfa0c5131a19d/pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352", size = 4296906 }, - { url = "https://files.pythonhosted.org/packages/a8/28/2f9d32014dfc7753e586db9add35b8a41b7a3b46540e965cb6d6bc607bd2/pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3", size = 4431759 }, - { url = "https://files.pythonhosted.org/packages/33/48/19c2cbe7403870fbe8b7737d19eb013f46299cdfe4501573367f6396c775/pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9", size = 2291657 }, - { url = "https://files.pythonhosted.org/packages/3b/ad/285c556747d34c399f332ba7c1a595ba245796ef3e22eae190f5364bb62b/pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c", size = 2626304 }, - { url = "https://files.pythonhosted.org/packages/e5/7b/ef35a71163bf36db06e9c8729608f78dedf032fc8313d19bd4be5c2588f3/pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65", size = 2375117 }, - { url = "https://files.pythonhosted.org/packages/79/30/77f54228401e84d6791354888549b45824ab0ffde659bafa67956303a09f/pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861", size = 3230060 }, - { url = "https://files.pythonhosted.org/packages/ce/b1/56723b74b07dd64c1010fee011951ea9c35a43d8020acd03111f14298225/pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081", size = 3106192 }, - { url = "https://files.pythonhosted.org/packages/e1/cd/7bf7180e08f80a4dcc6b4c3a0aa9e0b0ae57168562726a05dc8aa8fa66b0/pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c", size = 4446805 }, - { url = "https://files.pythonhosted.org/packages/97/42/87c856ea30c8ed97e8efbe672b58c8304dee0573f8c7cab62ae9e31db6ae/pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547", size = 4530623 }, - { url = "https://files.pythonhosted.org/packages/ff/41/026879e90c84a88e33fb00cc6bd915ac2743c67e87a18f80270dfe3c2041/pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab", size = 4465191 }, - { url = "https://files.pythonhosted.org/packages/e5/fb/a7960e838bc5df57a2ce23183bfd2290d97c33028b96bde332a9057834d3/pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9", size = 2295494 }, - { url = "https://files.pythonhosted.org/packages/d7/6c/6ec83ee2f6f0fda8d4cf89045c6be4b0373ebfc363ba8538f8c999f63fcd/pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe", size = 2631595 }, - { url = "https://files.pythonhosted.org/packages/cf/6c/41c21c6c8af92b9fea313aa47c75de49e2f9a467964ee33eb0135d47eb64/pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756", size = 2377651 }, ] [[package]] @@ -3501,22 +3277,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/2f/6b32f273fa02e978b7577159eae7471b3cfb88b48563b1c2578b2d7ca0bb/propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", size = 230704 }, { url = "https://files.pythonhosted.org/packages/5c/2e/f40ae6ff5624a5f77edd7b8359b208b5455ea113f68309e2b00a2e1426b6/propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", size = 40050 }, { url = "https://files.pythonhosted.org/packages/3b/77/a92c3ef994e47180862b9d7d11e37624fb1c00a16d61faf55115d970628b/propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", size = 44117 }, - { url = "https://files.pythonhosted.org/packages/0f/2a/329e0547cf2def8857157f9477669043e75524cc3e6251cef332b3ff256f/propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc", size = 77002 }, - { url = "https://files.pythonhosted.org/packages/12/2d/c4df5415e2382f840dc2ecbca0eeb2293024bc28e57a80392f2012b4708c/propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9", size = 44639 }, - { url = "https://files.pythonhosted.org/packages/d0/5a/21aaa4ea2f326edaa4e240959ac8b8386ea31dedfdaa636a3544d9e7a408/propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439", size = 44049 }, - { url = "https://files.pythonhosted.org/packages/4e/3e/021b6cd86c0acc90d74784ccbb66808b0bd36067a1bf3e2deb0f3845f618/propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536", size = 224819 }, - { url = "https://files.pythonhosted.org/packages/3c/57/c2fdeed1b3b8918b1770a133ba5c43ad3d78e18285b0c06364861ef5cc38/propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629", size = 229625 }, - { url = "https://files.pythonhosted.org/packages/9d/81/70d4ff57bf2877b5780b466471bebf5892f851a7e2ca0ae7ffd728220281/propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b", size = 232934 }, - { url = "https://files.pythonhosted.org/packages/3c/b9/bb51ea95d73b3fb4100cb95adbd4e1acaf2cbb1fd1083f5468eeb4a099a8/propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052", size = 227361 }, - { url = "https://files.pythonhosted.org/packages/f1/20/3c6d696cd6fd70b29445960cc803b1851a1131e7a2e4ee261ee48e002bcd/propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce", size = 213904 }, - { url = "https://files.pythonhosted.org/packages/a1/cb/1593bfc5ac6d40c010fa823f128056d6bc25b667f5393781e37d62f12005/propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d", size = 212632 }, - { url = "https://files.pythonhosted.org/packages/6d/5c/e95617e222be14a34c709442a0ec179f3207f8a2b900273720501a70ec5e/propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce", size = 207897 }, - { url = "https://files.pythonhosted.org/packages/8e/3b/56c5ab3dc00f6375fbcdeefdede5adf9bee94f1fab04adc8db118f0f9e25/propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95", size = 208118 }, - { url = "https://files.pythonhosted.org/packages/86/25/d7ef738323fbc6ebcbce33eb2a19c5e07a89a3df2fded206065bd5e868a9/propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf", size = 217851 }, - { url = "https://files.pythonhosted.org/packages/b3/77/763e6cef1852cf1ba740590364ec50309b89d1c818e3256d3929eb92fabf/propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f", size = 222630 }, - { url = "https://files.pythonhosted.org/packages/4f/e9/0f86be33602089c701696fbed8d8c4c07b6ee9605c5b7536fd27ed540c5b/propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30", size = 216269 }, - { url = "https://files.pythonhosted.org/packages/cc/02/5ac83217d522394b6a2e81a2e888167e7ca629ef6569a3f09852d6dcb01a/propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6", size = 39472 }, - { url = "https://files.pythonhosted.org/packages/f4/33/d6f5420252a36034bc8a3a01171bc55b4bff5df50d1c63d9caa50693662f/propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1", size = 43363 }, { url = "https://files.pythonhosted.org/packages/41/b6/c5319caea262f4821995dca2107483b94a3345d4607ad797c76cb9c36bcc/propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", size = 11818 }, ] @@ -3552,8 +3312,6 @@ version = "6.1.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/d4/8095b53c4950f44dc99b8d983b796f405ae1f58d80978fcc0421491b4201/psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac", size = 246855 }, - { url = "https://files.pythonhosted.org/packages/b1/63/0b6425ea4f2375988209a9934c90d6079cc7537847ed58a28fbe30f4277e/psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030", size = 250110 }, { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, @@ -3624,19 +3382,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b6/1f/966b722251a7354114ccbb71cf1a83922023e69efd8945ebf628a851ec4c/pyarrow-19.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a08e2a8a039a3f72afb67a6668180f09fddaa38fe0d21f13212b4aba4b5d2451", size = 40505858 }, { url = "https://files.pythonhosted.org/packages/3b/5e/6bc81aa7fc9affc7d1c03b912fbcc984ca56c2a18513684da267715dab7b/pyarrow-19.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f43f5aef2a13d4d56adadae5720d1fed4c1356c993eda8b59dace4b5983843c1", size = 42084973 }, { url = "https://files.pythonhosted.org/packages/53/c3/2f56da818b6a4758cbd514957c67bd0f078ebffa5390ee2e2bf0f9e8defc/pyarrow-19.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f672f5364b2d7829ef7c94be199bb88bf5661dd485e21d2d37de12ccb78a136", size = 25241976 }, - { url = "https://files.pythonhosted.org/packages/f5/b9/ba07ed3dd6b6e4f379b78e9c47c50c8886e07862ab7fa6339ac38622d755/pyarrow-19.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:cf3bf0ce511b833f7bc5f5bb3127ba731e97222023a444b7359f3a22e2a3b463", size = 30651291 }, - { url = "https://files.pythonhosted.org/packages/ad/10/0d304243c8277035298a68a70807efb76199c6c929bb3363c92ac9be6a0d/pyarrow-19.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:4d8b0c0de0a73df1f1bf439af1b60f273d719d70648e898bc077547649bb8352", size = 32100461 }, - { url = "https://files.pythonhosted.org/packages/8a/61/bcfc5182e11831bca3f849945b9b106e09fd10ded773dff466658e972a45/pyarrow-19.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92aff08e23d281c69835e4a47b80569242a504095ef6a6223c1f6bb8883431d", size = 41132491 }, - { url = "https://files.pythonhosted.org/packages/8e/87/2915a29049ec352dc69a967fbcbd76b0180319233de0daf8bd368df37099/pyarrow-19.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3b78eff5968a1889a0f3bc81ca57e1e19b75f664d9c61a42a604bf9d8402aae", size = 42192529 }, - { url = "https://files.pythonhosted.org/packages/48/18/44e5542b2707a8afaf78b5b88c608f261871ae77787eac07b7c679ca6f0f/pyarrow-19.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b34d3bde38eba66190b215bae441646330f8e9da05c29e4b5dd3e41bde701098", size = 40495363 }, - { url = "https://files.pythonhosted.org/packages/ba/d6/5096deb7599bbd20bc2768058fe23bc725b88eb41bee58303293583a2935/pyarrow-19.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5418d4d0fab3a0ed497bad21d17a7973aad336d66ad4932a3f5f7480d4ca0c04", size = 42074075 }, - { url = "https://files.pythonhosted.org/packages/2c/df/e3c839c04c284c9ec3d62b02a8c452b795d9b07b04079ab91ce33484d4c5/pyarrow-19.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e82c3d5e44e969c217827b780ed8faf7ac4c53f934ae9238872e749fa531f7c9", size = 25239803 }, - { url = "https://files.pythonhosted.org/packages/6a/d3/a6d4088e906c7b5d47792256212606d2ae679046dc750eee0ae167338e5c/pyarrow-19.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f208c3b58a6df3b239e0bb130e13bc7487ed14f39a9ff357b6415e3f6339b560", size = 30695401 }, - { url = "https://files.pythonhosted.org/packages/94/25/70040fd0e397dd1b937f459eaeeec942a76027357491dca0ada09d1322af/pyarrow-19.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:c751c1c93955b7a84c06794df46f1cec93e18610dcd5ab7d08e89a81df70a849", size = 32104680 }, - { url = "https://files.pythonhosted.org/packages/4e/f9/92783290cc0d80ca16d34b0c126305bfacca4b87dd889c8f16c6ef2a8fd7/pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b903afaa5df66d50fc38672ad095806443b05f202c792694f3a604ead7c6ea6e", size = 41076754 }, - { url = "https://files.pythonhosted.org/packages/05/46/2c9870f50a495c72e2b8982ae29a9b1680707ea936edc0de444cec48f875/pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22a4bc0937856263df8b94f2f2781b33dd7f876f787ed746608e06902d691a5", size = 42163133 }, - { url = "https://files.pythonhosted.org/packages/7b/2f/437922b902549228fb15814e8a26105bff2787ece466a8d886eb6699efad/pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:5e8a28b918e2e878c918f6d89137386c06fe577cd08d73a6be8dafb317dc2d73", size = 40452210 }, - { url = "https://files.pythonhosted.org/packages/36/ef/1d7975053af9d106da973bac142d0d4da71b7550a3576cc3e0b3f444d21a/pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:29cd86c8001a94f768f79440bf83fee23963af5e7bc68ce3a7e5f120e17edf89", size = 42077618 }, ] [[package]] @@ -3678,12 +3423,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/65/cb014acc41cd5bf6bbfa4671c7faffffb9cee01706642c2dec70c5209ac8/pyclipper-1.3.0.post6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58eae2ff92a8cae1331568df076c4c5775bf946afab0068b217f0cf8e188eb3c", size = 963797 }, { url = "https://files.pythonhosted.org/packages/80/ec/b40cd81ab7598984167508a5369a2fa31a09fe3b3e3d0b73aa50e06d4b3f/pyclipper-1.3.0.post6-cp312-cp312-win32.whl", hash = "sha256:793b0aa54b914257aa7dc76b793dd4dcfb3c84011d48df7e41ba02b571616eaf", size = 99456 }, { url = "https://files.pythonhosted.org/packages/24/3a/7d6292e3c94fb6b872d8d7e80d909dc527ee6b0af73b753c63fdde65a7da/pyclipper-1.3.0.post6-cp312-cp312-win_amd64.whl", hash = "sha256:d3f9da96f83b8892504923beb21a481cd4516c19be1d39eb57a92ef1c9a29548", size = 110278 }, - { url = "https://files.pythonhosted.org/packages/8c/b3/75232906bd13f869600d23bdb8fe6903cc899fa7e96981ae4c9b7d9c409e/pyclipper-1.3.0.post6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f129284d2c7bcd213d11c0f35e1ae506a1144ce4954e9d1734d63b120b0a1b58", size = 268254 }, - { url = "https://files.pythonhosted.org/packages/0b/db/35843050a3dd7586781497a21ca6c8d48111afb66061cb40c3d3c288596d/pyclipper-1.3.0.post6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:188fbfd1d30d02247f92c25ce856f5f3c75d841251f43367dbcf10935bc48f38", size = 142204 }, - { url = "https://files.pythonhosted.org/packages/7c/d7/1faa0ff35caa02cb32cb0583688cded3f38788f33e02bfe6461fbcc1bee1/pyclipper-1.3.0.post6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6d129d0c2587f2f5904d201a4021f859afbb45fada4261c9fdedb2205b09d23", size = 943835 }, - { url = "https://files.pythonhosted.org/packages/31/10/c0bf140bee2844e2c0617fdcc8a4e8daf98e71710046b06034e6f1963404/pyclipper-1.3.0.post6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c9c80b5c46eef38ba3f12dd818dc87f5f2a0853ba914b6f91b133232315f526", size = 962510 }, - { url = "https://files.pythonhosted.org/packages/85/6f/8c6afc49b51b1bf16d5903ecd5aee657cf88f52c83cb5fabf771deeba728/pyclipper-1.3.0.post6-cp313-cp313-win32.whl", hash = "sha256:b15113ec4fc423b58e9ae80aa95cf5a0802f02d8f02a98a46af3d7d66ff0cc0e", size = 98836 }, - { url = "https://files.pythonhosted.org/packages/d5/19/9ff4551b42f2068686c50c0d199072fa67aee57fc5cf86770cacf71efda3/pyclipper-1.3.0.post6-cp313-cp313-win_amd64.whl", hash = "sha256:e5ff68fa770ac654c7974fc78792978796f068bd274e95930c0691c31e192889", size = 109672 }, ] [[package]] @@ -3742,18 +3481,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/b9/41f7efe80f6ce2ed3ee3c2dcfe10ab7adc1172f778cc9659509a79518c43/pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24", size = 2116872 }, { url = "https://files.pythonhosted.org/packages/63/08/b59b7a92e03dd25554b0436554bf23e7c29abae7cce4b1c459cd92746811/pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84", size = 1738535 }, { url = "https://files.pythonhosted.org/packages/88/8d/479293e4d39ab409747926eec4329de5b7129beaedc3786eca070605d07f/pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9", size = 1917992 }, - { url = "https://files.pythonhosted.org/packages/ad/ef/16ee2df472bf0e419b6bc68c05bf0145c49247a1095e85cee1463c6a44a1/pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc", size = 1856143 }, - { url = "https://files.pythonhosted.org/packages/da/fa/bc3dbb83605669a34a93308e297ab22be82dfb9dcf88c6cf4b4f264e0a42/pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd", size = 1770063 }, - { url = "https://files.pythonhosted.org/packages/4e/48/e813f3bbd257a712303ebdf55c8dc46f9589ec74b384c9f652597df3288d/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05", size = 1790013 }, - { url = "https://files.pythonhosted.org/packages/b4/e0/56eda3a37929a1d297fcab1966db8c339023bcca0b64c5a84896db3fcc5c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d", size = 1801077 }, - { url = "https://files.pythonhosted.org/packages/04/be/5e49376769bfbf82486da6c5c1683b891809365c20d7c7e52792ce4c71f3/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510", size = 1996782 }, - { url = "https://files.pythonhosted.org/packages/bc/24/e3ee6c04f1d58cc15f37bcc62f32c7478ff55142b7b3e6d42ea374ea427c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6", size = 2661375 }, - { url = "https://files.pythonhosted.org/packages/c1/f8/11a9006de4e89d016b8de74ebb1db727dc100608bb1e6bbe9d56a3cbbcce/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b", size = 2071635 }, - { url = "https://files.pythonhosted.org/packages/7c/45/bdce5779b59f468bdf262a5bc9eecbae87f271c51aef628d8c073b4b4b4c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327", size = 1916994 }, - { url = "https://files.pythonhosted.org/packages/d8/fa/c648308fe711ee1f88192cad6026ab4f925396d1293e8356de7e55be89b5/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6", size = 1968877 }, - { url = "https://files.pythonhosted.org/packages/16/16/b805c74b35607d24d37103007f899abc4880923b04929547ae68d478b7f4/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f", size = 2116814 }, - { url = "https://files.pythonhosted.org/packages/d1/58/5305e723d9fcdf1c5a655e6a4cc2a07128bf644ff4b1d98daf7a9dbf57da/pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769", size = 1738360 }, - { url = "https://files.pythonhosted.org/packages/a5/ae/e14b0ff8b3f48e02394d8acd911376b7b66e164535687ef7dc24ea03072f/pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5", size = 1919411 }, ] [[package]] @@ -3859,15 +3586,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c1/66/e98b2308971d45667cb8179d4d66deca47336c90663a7e0527589f1038b7/pymongo-4.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e974ab16a60be71a8dfad4e5afccf8dd05d41c758060f5d5bda9a758605d9a5d", size = 1862230 }, { url = "https://files.pythonhosted.org/packages/6c/80/ba9b7ed212a5f8cf8ad7037ed5bbebc1c587fc09242108f153776e4a338b/pymongo-4.10.1-cp312-cp312-win32.whl", hash = "sha256:544890085d9641f271d4f7a47684450ed4a7344d6b72d5968bfae32203b1bb7c", size = 903045 }, { url = "https://files.pythonhosted.org/packages/76/8b/5afce891d78159912c43726fab32641e3f9718f14be40f978c148ea8db48/pymongo-4.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:dcc07b1277e8b4bf4d7382ca133850e323b7ab048b8353af496d050671c7ac52", size = 926686 }, - { url = "https://files.pythonhosted.org/packages/83/76/df0fd0622a85b652ad0f91ec8a0ebfd0cb86af6caec8999a22a1f7481203/pymongo-4.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:90bc6912948dfc8c363f4ead54d54a02a15a7fee6cfafb36dc450fc8962d2cb7", size = 996981 }, - { url = "https://files.pythonhosted.org/packages/4c/39/fa50531de8d1d8af8c253caeed20c18ccbf1de5d970119c4a42c89f2bd09/pymongo-4.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:594dd721b81f301f33e843453638e02d92f63c198358e5a0fa8b8d0b1218dabc", size = 996769 }, - { url = "https://files.pythonhosted.org/packages/bf/50/6936612c1b2e32d95c30e860552d3bc9e55cfa79a4f73b73225fa05a028c/pymongo-4.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0783e0c8e95397c84e9cf8ab092ab1e5dd7c769aec0ef3a5838ae7173b98dea0", size = 2169159 }, - { url = "https://files.pythonhosted.org/packages/78/8c/45cb23096e66c7b1da62bb8d9c7ac2280e7c1071e13841e7fb71bd44fd9f/pymongo-4.10.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fb6a72e88df46d1c1040fd32cd2d2c5e58722e5d3e31060a0393f04ad3283de", size = 2260569 }, - { url = "https://files.pythonhosted.org/packages/29/b6/e5ec697087e527a6a15c5f8daa5bcbd641edb8813487345aaf963d3537dc/pymongo-4.10.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e3a593333e20c87415420a4fb76c00b7aae49b6361d2e2205b6fece0563bf40", size = 2218142 }, - { url = "https://files.pythonhosted.org/packages/ad/8a/c0b45bee0f0c57732c5c36da5122c1796efd5a62d585fbc504e2f1401244/pymongo-4.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72e2ace7456167c71cfeca7dcb47bd5dceda7db2231265b80fc625c5e8073186", size = 2170623 }, - { url = "https://files.pythonhosted.org/packages/3b/26/6c0a5360a571df24c9bfbd51b1dae279f4f0c511bdbc0906f6df6d1543fa/pymongo-4.10.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ad05eb9c97e4f589ed9e74a00fcaac0d443ccd14f38d1258eb4c39a35dd722b", size = 2111112 }, - { url = "https://files.pythonhosted.org/packages/38/bc/5b91b728e1cf505d931f04e24cbac71ae519523785570ed046cdc31e6efc/pymongo-4.10.1-cp313-cp313-win32.whl", hash = "sha256:ee4c86d8e6872a61f7888fc96577b0ea165eb3bdb0d841962b444fa36001e2bb", size = 948727 }, - { url = "https://files.pythonhosted.org/packages/0d/2a/7c24a6144eaa06d18ed52822ea2b0f119fd9267cd1abbb75dae4d89a3803/pymongo-4.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:45ee87a4e12337353242bc758accc7fb47a2f2d9ecc0382a61e64c8f01e86708", size = 976873 }, ] [[package]] @@ -4103,9 +3821,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, - { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, - { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, - { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, ] [[package]] @@ -4141,15 +3856,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, ] [[package]] @@ -4218,21 +3924,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/66/5d/5dc02c87d9a0e64e0abd728d3255ddce8475e06b6be3f732a460f0a360c9/rapidfuzz-3.11.0-cp312-cp312-win32.whl", hash = "sha256:ba26d87fe7fcb56c4a53b549a9e0e9143f6b0df56d35fe6ad800c902447acd5b", size = 1824882 }, { url = "https://files.pythonhosted.org/packages/b7/da/a37d532cbefd7242191abf18f438b315bf5c72d742f78414a8ec1b7396cf/rapidfuzz-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:b1f7efdd7b7adb32102c2fa481ad6f11923e2deb191f651274be559d56fc913b", size = 1606419 }, { url = "https://files.pythonhosted.org/packages/92/d0/1406d6e110aff87303e98f47adc5e76ef2e69d51cdd08b2d463520158cab/rapidfuzz-3.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:ed78c8e94f57b44292c1a0350f580e18d3a3c5c0800e253f1583580c1b417ad2", size = 858655 }, - { url = "https://files.pythonhosted.org/packages/8a/30/984f1013d28b88304386c8e70b5d63db4765c28be8d9ef68d177c9addc77/rapidfuzz-3.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e60814edd0c9b511b5f377d48b9782b88cfe8be07a98f99973669299c8bb318a", size = 1931354 }, - { url = "https://files.pythonhosted.org/packages/a4/8a/41d4f95c5742a8a47c0e96c02957f72f8c34411cecde87fe371d5e09807e/rapidfuzz-3.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f28952da055dbfe75828891cd3c9abf0984edc8640573c18b48c14c68ca5e06", size = 1417918 }, - { url = "https://files.pythonhosted.org/packages/e3/26/031ac8366831da6afc5f25462196eab0e0caf9422c83c007307e23a6f010/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e8f93bc736020351a6f8e71666e1f486bb8bd5ce8112c443a30c77bfde0eb68", size = 1388327 }, - { url = "https://files.pythonhosted.org/packages/17/1b/927edcd3b540770d3d6d52fe079c6bffdb99e9dfa4b73585bee2a8bd6504/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76a4a11ba8f678c9e5876a7d465ab86def047a4fcc043617578368755d63a1bc", size = 5513214 }, - { url = "https://files.pythonhosted.org/packages/0d/a2/c1e4f35e7bfbbd97a665f8cd119d8bd4a085f1721366cd76582dc022131b/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc0e0d41ad8a056a9886bac91ff9d9978e54a244deb61c2972cc76b66752de9c", size = 1638560 }, - { url = "https://files.pythonhosted.org/packages/39/3f/6827972efddb1e357a0b6165ae9e310d7dc5c078af3023893365c212641b/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8ea35f2419c7d56b3e75fbde2698766daedb374f20eea28ac9b1f668ef4f74", size = 1667185 }, - { url = "https://files.pythonhosted.org/packages/cc/5d/6902b93e1273e69ea087afd16e7504099bcb8d712a9f69cb649ea05ca7e1/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd340bbd025302276b5aa221dccfe43040c7babfc32f107c36ad783f2ffd8775", size = 3107466 }, - { url = "https://files.pythonhosted.org/packages/a6/02/bdb2048c9b8edf4cd82c2e8f6a8ed9af0fbdf91810ca2b36d1be6fc996d8/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:494eef2c68305ab75139034ea25328a04a548d297712d9cf887bf27c158c388b", size = 2302041 }, - { url = "https://files.pythonhosted.org/packages/12/91/0bbe51e3c15c02578487fd10a14692a40677ea974098d8d376bafd627a89/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5a167344c1d6db06915fb0225592afdc24d8bafaaf02de07d4788ddd37f4bc2f", size = 6899969 }, - { url = "https://files.pythonhosted.org/packages/27/9d/09b85adfd5829f60bd6dbe53ba66dad22f93a281d494a5638b5f20fb6a8a/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c7af25bda96ac799378ac8aba54a8ece732835c7b74cfc201b688a87ed11152", size = 2669022 }, - { url = "https://files.pythonhosted.org/packages/cb/07/6fb723963243335c3bf73925914b6998649d642eff550187454d5bb3d077/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d2a0f7e17f33e7890257367a1662b05fecaf56625f7dbb6446227aaa2b86448b", size = 3229475 }, - { url = "https://files.pythonhosted.org/packages/3a/8e/e9af6da2e235aa29ad2bb0a1fc2472b2949ed8d9ff8fb0f05b4bfbbf7675/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4d0d26c7172bdb64f86ee0765c5b26ea1dc45c52389175888ec073b9b28f4305", size = 4143861 }, - { url = "https://files.pythonhosted.org/packages/fd/d8/4677e36e958b4d95d039d254d597db9c020896c8130911dc36b136373b87/rapidfuzz-3.11.0-cp313-cp313-win32.whl", hash = "sha256:6ad02bab756751c90fa27f3069d7b12146613061341459abf55f8190d899649f", size = 1822624 }, - { url = "https://files.pythonhosted.org/packages/e8/97/1c782140e688ea2c3337d94516c635c575aa39fe62782fd53ad5d2119df4/rapidfuzz-3.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:b1472986fd9c5d318399a01a0881f4a0bf4950264131bb8e2deba9df6d8c362b", size = 1604273 }, - { url = "https://files.pythonhosted.org/packages/a6/83/8b713d50bec947e945a79be47f772484307fc876c426fb26c6f369098389/rapidfuzz-3.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:c408f09649cbff8da76f8d3ad878b64ba7f7abdad1471efb293d2c075e80c822", size = 857385 }, ] [[package]] @@ -4307,21 +3998,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692 }, { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135 }, { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567 }, - { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525 }, - { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324 }, - { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617 }, - { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023 }, - { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072 }, - { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130 }, - { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857 }, - { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006 }, - { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650 }, - { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545 }, - { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045 }, - { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182 }, - { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733 }, - { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122 }, - { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545 }, ] [[package]] @@ -4485,15 +4161,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a4/f6/ff7beaeb644bcad72bcfd5a03ff36d32ee4e53a8b29a639f11bcb65d06cd/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f", size = 12253728 }, { url = "https://files.pythonhosted.org/packages/29/7a/8bce8968883e9465de20be15542f4c7e221952441727c4dad24d534c6d99/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86", size = 13147700 }, { url = "https://files.pythonhosted.org/packages/62/27/585859e72e117fe861c2079bcba35591a84f801e21bc1ab85bce6ce60305/scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52", size = 11110613 }, - { url = "https://files.pythonhosted.org/packages/2e/59/8eb1872ca87009bdcdb7f3cdc679ad557b992c12f4b61f9250659e592c63/scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322", size = 12010001 }, - { url = "https://files.pythonhosted.org/packages/9d/05/f2fc4effc5b32e525408524c982c468c29d22f828834f0625c5ef3d601be/scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1", size = 11096360 }, - { url = "https://files.pythonhosted.org/packages/c8/e4/4195d52cf4f113573fb8ebc44ed5a81bd511a92c0228889125fac2f4c3d1/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348", size = 12209004 }, - { url = "https://files.pythonhosted.org/packages/94/be/47e16cdd1e7fcf97d95b3cb08bde1abb13e627861af427a3651fcb80b517/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97", size = 13171776 }, - { url = "https://files.pythonhosted.org/packages/34/b0/ca92b90859070a1487827dbc672f998da95ce83edce1270fc23f96f1f61a/scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb", size = 11071865 }, - { url = "https://files.pythonhosted.org/packages/12/ae/993b0fb24a356e71e9a894e42b8a9eec528d4c70217353a1cd7a48bc25d4/scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236", size = 11955804 }, - { url = "https://files.pythonhosted.org/packages/d6/54/32fa2ee591af44507eac86406fa6bba968d1eb22831494470d0a2e4a1eb1/scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35", size = 11100530 }, - { url = "https://files.pythonhosted.org/packages/3f/58/55856da1adec655bdce77b502e94a267bf40a8c0b89f8622837f89503b5a/scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691", size = 12433852 }, - { url = "https://files.pythonhosted.org/packages/ff/4f/c83853af13901a574f8f13b645467285a48940f185b690936bb700a50863/scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f", size = 11337256 }, ] [[package]] @@ -4521,21 +4188,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/3c/0de11ca154e24a57b579fb648151d901326d3102115bc4f9a7a86526ce54/scipy-1.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb57b30f0017d4afa5fe5f5b150b8f807618819287c21cbe51130de7ccdaed2", size = 40249869 }, { url = "https://files.pythonhosted.org/packages/15/09/472e8d0a6b33199d1bb95e49bedcabc0976c3724edd9b0ef7602ccacf41e/scipy-1.15.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491d57fe89927fa1aafbe260f4cfa5ffa20ab9f1435025045a5315006a91b8f5", size = 42629068 }, { url = "https://files.pythonhosted.org/packages/ff/ba/31c7a8131152822b3a2cdeba76398ffb404d81d640de98287d236da90c49/scipy-1.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:900f3fa3db87257510f011c292a5779eb627043dd89731b9c461cd16ef76ab3d", size = 43621992 }, - { url = "https://files.pythonhosted.org/packages/2b/bf/dd68965a4c5138a630eeed0baec9ae96e5d598887835bdde96cdd2fe4780/scipy-1.15.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:100193bb72fbff37dbd0bf14322314fc7cbe08b7ff3137f11a34d06dc0ee6b85", size = 41441136 }, - { url = "https://files.pythonhosted.org/packages/ef/5e/4928581312922d7e4d416d74c416a660addec4dd5ea185401df2269ba5a0/scipy-1.15.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:2114a08daec64980e4b4cbdf5bee90935af66d750146b1d2feb0d3ac30613692", size = 32533699 }, - { url = "https://files.pythonhosted.org/packages/32/90/03f99c43041852837686898c66767787cd41c5843d7a1509c39ffef683e9/scipy-1.15.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6b3e71893c6687fc5e29208d518900c24ea372a862854c9888368c0b267387ab", size = 24807289 }, - { url = "https://files.pythonhosted.org/packages/9d/52/bfe82b42ae112eaba1af2f3e556275b8727d55ac6e4932e7aef337a9d9d4/scipy-1.15.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:837299eec3d19b7e042923448d17d95a86e43941104d33f00da7e31a0f715d3c", size = 27929844 }, - { url = "https://files.pythonhosted.org/packages/f6/77/54ff610bad600462c313326acdb035783accc6a3d5f566d22757ad297564/scipy-1.15.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82add84e8a9fb12af5c2c1a3a3f1cb51849d27a580cb9e6bd66226195142be6e", size = 38031272 }, - { url = "https://files.pythonhosted.org/packages/f1/26/98585cbf04c7cf503d7eb0a1966df8a268154b5d923c5fe0c1ed13154c49/scipy-1.15.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070d10654f0cb6abd295bc96c12656f948e623ec5f9a4eab0ddb1466c000716e", size = 40210217 }, - { url = "https://files.pythonhosted.org/packages/fd/3f/3d2285eb6fece8bc5dbb2f9f94d61157d61d155e854fd5fea825b8218f12/scipy-1.15.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55cc79ce4085c702ac31e49b1e69b27ef41111f22beafb9b49fea67142b696c4", size = 42587785 }, - { url = "https://files.pythonhosted.org/packages/48/7d/5b5251984bf0160d6533695a74a5fddb1fa36edd6f26ffa8c871fbd4782a/scipy-1.15.1-cp313-cp313-win_amd64.whl", hash = "sha256:c352c1b6d7cac452534517e022f8f7b8d139cd9f27e6fbd9f3cbd0bfd39f5bef", size = 43640439 }, - { url = "https://files.pythonhosted.org/packages/e7/b8/0e092f592d280496de52e152582030f8a270b194f87f890e1a97c5599b81/scipy-1.15.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0458839c9f873062db69a03de9a9765ae2e694352c76a16be44f93ea45c28d2b", size = 41619862 }, - { url = "https://files.pythonhosted.org/packages/f6/19/0b6e1173aba4db9e0b7aa27fe45019857fb90d6904038b83927cbe0a6c1d/scipy-1.15.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:af0b61c1de46d0565b4b39c6417373304c1d4f5220004058bdad3061c9fa8a95", size = 32610387 }, - { url = "https://files.pythonhosted.org/packages/e7/02/754aae3bd1fa0f2479ade3cfdf1732ecd6b05853f63eee6066a32684563a/scipy-1.15.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:71ba9a76c2390eca6e359be81a3e879614af3a71dfdabb96d1d7ab33da6f2364", size = 24883814 }, - { url = "https://files.pythonhosted.org/packages/1f/ac/d7906201604a2ea3b143bb0de51b3966f66441ba50b7dc182c4505b3edf9/scipy-1.15.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14eaa373c89eaf553be73c3affb11ec6c37493b7eaaf31cf9ac5dffae700c2e0", size = 27944865 }, - { url = "https://files.pythonhosted.org/packages/84/9d/8f539002b5e203723af6a6f513a45e0a7671e9dabeedb08f417ac17e4edc/scipy-1.15.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f735bc41bd1c792c96bc426dece66c8723283695f02df61dcc4d0a707a42fc54", size = 39883261 }, - { url = "https://files.pythonhosted.org/packages/97/c0/62fd3bab828bcccc9b864c5997645a3b86372a35941cdaf677565c25c98d/scipy-1.15.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2722a021a7929d21168830790202a75dbb20b468a8133c74a2c0230c72626b6c", size = 42093299 }, - { url = "https://files.pythonhosted.org/packages/e4/1f/5d46a8d94e9f6d2c913cbb109e57e7eed914de38ea99e2c4d69a9fc93140/scipy-1.15.1-cp313-cp313t-win_amd64.whl", hash = "sha256:bc7136626261ac1ed988dca56cfc4ab5180f75e0ee52e58f1e6aa74b5f3eacd5", size = 43181730 }, ] [[package]] @@ -4610,12 +4262,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d5/7d/9a57e187cbf2fbbbdfd4044a4f9ce141c8d221f9963750d3b001f0ec080d/shapely-2.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fea108334be345c283ce74bf064fa00cfdd718048a8af7343c59eb40f59726", size = 2524835 }, { url = "https://files.pythonhosted.org/packages/6d/0a/f407509ab56825f39bf8cfce1fb410238da96cf096809c3e404e5bc71ea1/shapely-2.0.6-cp312-cp312-win32.whl", hash = "sha256:42fd4cd4834747e4990227e4cbafb02242c0cffe9ce7ef9971f53ac52d80d55f", size = 1295613 }, { url = "https://files.pythonhosted.org/packages/7b/b3/857afd9dfbfc554f10d683ac412eac6fa260d1f4cd2967ecb655c57e831a/shapely-2.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:665990c84aece05efb68a21b3523a6b2057e84a1afbef426ad287f0796ef8a48", size = 1442539 }, - { url = "https://files.pythonhosted.org/packages/34/e8/d164ef5b0eab86088cde06dee8415519ffd5bb0dd1bd9d021e640e64237c/shapely-2.0.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:42805ef90783ce689a4dde2b6b2f261e2c52609226a0438d882e3ced40bb3013", size = 1445344 }, - { url = "https://files.pythonhosted.org/packages/ce/e2/9fba7ac142f7831757a10852bfa465683724eadbc93d2d46f74a16f9af04/shapely-2.0.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d2cb146191a47bd0cee8ff5f90b47547b82b6345c0d02dd8b25b88b68af62d7", size = 1296182 }, - { url = "https://files.pythonhosted.org/packages/cf/dc/790d4bda27d196cd56ec66975eaae3351c65614cafd0e16ddde39ec9fb92/shapely-2.0.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3fdef0a1794a8fe70dc1f514440aa34426cc0ae98d9a1027fb299d45741c381", size = 2423426 }, - { url = "https://files.pythonhosted.org/packages/af/b0/f8169f77eac7392d41e231911e0095eb1148b4d40c50ea9e34d999c89a7e/shapely-2.0.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c665a0301c645615a107ff7f52adafa2153beab51daf34587170d85e8ba6805", size = 2513249 }, - { url = "https://files.pythonhosted.org/packages/f6/1d/a8c0e9ab49ff2f8e4dedd71b0122eafb22a18ad7e9d256025e1f10c84704/shapely-2.0.6-cp313-cp313-win32.whl", hash = "sha256:0334bd51828f68cd54b87d80b3e7cee93f249d82ae55a0faf3ea21c9be7b323a", size = 1294848 }, - { url = "https://files.pythonhosted.org/packages/23/38/2bc32dd1e7e67a471d4c60971e66df0bdace88656c47a9a728ace0091075/shapely-2.0.6-cp313-cp313-win_amd64.whl", hash = "sha256:d37d070da9e0e0f0a530a621e17c0b8c3c9d04105655132a87cfff8bd77cc4c2", size = 1441371 }, ] [[package]] @@ -4795,12 +4441,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/32/e0e3a859136e95c85a572e4806dc58bf1ddf651108ae8b97d5f3ebe1a244/tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04", size = 1175432 }, { url = "https://files.pythonhosted.org/packages/c7/89/926b66e9025b97e9fbabeaa59048a736fe3c3e4530a204109571104f921c/tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc", size = 1236576 }, { url = "https://files.pythonhosted.org/packages/45/e2/39d4aa02a52bba73b2cd21ba4533c84425ff8786cc63c511d68c8897376e/tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db", size = 883824 }, - { url = "https://files.pythonhosted.org/packages/e3/38/802e79ba0ee5fcbf240cd624143f57744e5d411d2e9d9ad2db70d8395986/tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24", size = 1039648 }, - { url = "https://files.pythonhosted.org/packages/b1/da/24cdbfc302c98663fbea66f5866f7fa1048405c7564ab88483aea97c3b1a/tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a", size = 982763 }, - { url = "https://files.pythonhosted.org/packages/e4/f0/0ecf79a279dfa41fc97d00adccf976ecc2556d3c08ef3e25e45eb31f665b/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5", size = 1144417 }, - { url = "https://files.pythonhosted.org/packages/ab/d3/155d2d4514f3471a25dc1d6d20549ef254e2aa9bb5b1060809b1d3b03d3a/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953", size = 1175108 }, - { url = "https://files.pythonhosted.org/packages/19/eb/5989e16821ee8300ef8ee13c16effc20dfc26c777d05fbb6825e3c037b81/tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7", size = 1236520 }, - { url = "https://files.pythonhosted.org/packages/40/59/14b20465f1d1cb89cfbc96ec27e5617b2d41c79da12b5e04e96d689be2a7/tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69", size = 883849 }, ] [[package]] @@ -4849,7 +4489,7 @@ dependencies = [ { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "setuptools" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, { name = "sympy" }, { name = "triton", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "typing-extensions" }, @@ -4863,7 +4503,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6d/69/d8ada8b6e0a4257556d5b4ddeb4345ea8eeaaef3c98b60d1cca197c7ad8e/torch-2.5.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", size = 91811673 }, { url = "https://files.pythonhosted.org/packages/5f/ba/607d013b55b9fd805db2a5c2662ec7551f1910b4eef39653eeaba182c5b2/torch-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", size = 203046841 }, { url = "https://files.pythonhosted.org/packages/57/6c/bf52ff061da33deb9f94f4121fde7ff3058812cb7d2036c97bc167793bd1/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", size = 63858109 }, - { url = "https://files.pythonhosted.org/packages/69/72/20cb30f3b39a9face296491a86adb6ff8f1a47a897e4d14667e6cf89d5c3/torch-2.5.1-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", size = 906393265 }, ] [[package]] @@ -4995,16 +4634,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f2/2c/6990f4ccb41ed93744aaaa3786394bca0875503f97690622f3cafc0adfde/ujson-5.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:604a046d966457b6cdcacc5aa2ec5314f0e8c42bae52842c1e6fa02ea4bda42e", size = 1043576 }, { url = "https://files.pythonhosted.org/packages/14/f5/a2368463dbb09fbdbf6a696062d0c0f62e4ae6fa65f38f829611da2e8fdd/ujson-5.10.0-cp312-cp312-win32.whl", hash = "sha256:6dea1c8b4fc921bf78a8ff00bbd2bfe166345f5536c510671bccececb187c80e", size = 38764 }, { url = "https://files.pythonhosted.org/packages/59/2d/691f741ffd72b6c84438a93749ac57bf1a3f217ac4b0ea4fd0e96119e118/ujson-5.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:38665e7d8290188b1e0d57d584eb8110951a9591363316dd41cf8686ab1d0abc", size = 42211 }, - { url = "https://files.pythonhosted.org/packages/0d/69/b3e3f924bb0e8820bb46671979770c5be6a7d51c77a66324cdb09f1acddb/ujson-5.10.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:618efd84dc1acbd6bff8eaa736bb6c074bfa8b8a98f55b61c38d4ca2c1f7f287", size = 55646 }, - { url = "https://files.pythonhosted.org/packages/32/8a/9b748eb543c6cabc54ebeaa1f28035b1bd09c0800235b08e85990734c41e/ujson-5.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38d5d36b4aedfe81dfe251f76c0467399d575d1395a1755de391e58985ab1c2e", size = 51806 }, - { url = "https://files.pythonhosted.org/packages/39/50/4b53ea234413b710a18b305f465b328e306ba9592e13a791a6a6b378869b/ujson-5.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67079b1f9fb29ed9a2914acf4ef6c02844b3153913eb735d4bf287ee1db6e557", size = 51975 }, - { url = "https://files.pythonhosted.org/packages/b4/9d/8061934f960cdb6dd55f0b3ceeff207fcc48c64f58b43403777ad5623d9e/ujson-5.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d0e0ceeb8fe2468c70ec0c37b439dd554e2aa539a8a56365fd761edb418988", size = 53693 }, - { url = "https://files.pythonhosted.org/packages/f5/be/7bfa84b28519ddbb67efc8410765ca7da55e6b93aba84d97764cd5794dbc/ujson-5.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59e02cd37bc7c44d587a0ba45347cc815fb7a5fe48de16bf05caa5f7d0d2e816", size = 58594 }, - { url = "https://files.pythonhosted.org/packages/48/eb/85d465abafb2c69d9699cfa5520e6e96561db787d36c677370e066c7e2e7/ujson-5.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a890b706b64e0065f02577bf6d8ca3b66c11a5e81fb75d757233a38c07a1f20", size = 997853 }, - { url = "https://files.pythonhosted.org/packages/9f/76/2a63409fc05d34dd7d929357b7a45e3a2c96f22b4225cd74becd2ba6c4cb/ujson-5.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:621e34b4632c740ecb491efc7f1fcb4f74b48ddb55e65221995e74e2d00bbff0", size = 1140694 }, - { url = "https://files.pythonhosted.org/packages/45/ed/582c4daba0f3e1688d923b5cb914ada1f9defa702df38a1916c899f7c4d1/ujson-5.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9500e61fce0cfc86168b248104e954fead61f9be213087153d272e817ec7b4f", size = 1043580 }, - { url = "https://files.pythonhosted.org/packages/d7/0c/9837fece153051e19c7bade9f88f9b409e026b9525927824cdf16293b43b/ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165", size = 38766 }, - { url = "https://files.pythonhosted.org/packages/d7/72/6cb6728e2738c05bbe9bd522d6fc79f86b9a28402f38663e85a28fddd4a0/ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539", size = 42212 }, ] [[package]] @@ -5121,12 +4750,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/06/a7/b4e6a19925c900be9f98bec0a75e6e8f79bb53bdeb891916609ab3958967/uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc", size = 4693770 }, { url = "https://files.pythonhosted.org/packages/ce/0c/f07435a18a4b94ce6bd0677d8319cd3de61f3a9eeb1e5f8ab4e8b5edfcb3/uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb", size = 4451321 }, { url = "https://files.pythonhosted.org/packages/8f/eb/f7032be105877bcf924709c97b1bf3b90255b4ec251f9340cef912559f28/uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f", size = 4659022 }, - { url = "https://files.pythonhosted.org/packages/3f/8d/2cbef610ca21539f0f36e2b34da49302029e7c9f09acef0b1c3b5839412b/uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281", size = 1468123 }, - { url = "https://files.pythonhosted.org/packages/93/0d/b0038d5a469f94ed8f2b2fce2434a18396d8fbfb5da85a0a9781ebbdec14/uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af", size = 819325 }, - { url = "https://files.pythonhosted.org/packages/50/94/0a687f39e78c4c1e02e3272c6b2ccdb4e0085fda3b8352fecd0410ccf915/uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6", size = 4582806 }, - { url = "https://files.pythonhosted.org/packages/d2/19/f5b78616566ea68edd42aacaf645adbf71fbd83fc52281fba555dc27e3f1/uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816", size = 4701068 }, - { url = "https://files.pythonhosted.org/packages/47/57/66f061ee118f413cd22a656de622925097170b9380b30091b78ea0c6ea75/uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc", size = 4454428 }, - { url = "https://files.pythonhosted.org/packages/63/9a/0962b05b308494e3202d3f794a6e85abe471fe3cafdbcf95c2e8c713aabd/uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553", size = 4660018 }, ] [[package]] @@ -5173,18 +4796,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/47/143c92418e30cb9348a4387bfa149c8e0e404a7c5b0585d46d2f7031b4b9/watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94", size = 271822 }, { url = "https://files.pythonhosted.org/packages/ea/94/b0165481bff99a64b29e46e07ac2e0df9f7a957ef13bec4ceab8515f44e3/watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c", size = 285441 }, { url = "https://files.pythonhosted.org/packages/11/de/09fe56317d582742d7ca8c2ca7b52a85927ebb50678d9b0fa8194658f536/watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90", size = 277141 }, - { url = "https://files.pythonhosted.org/packages/08/98/f03efabec64b5b1fa58c0daab25c68ef815b0f320e54adcacd0d6847c339/watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9", size = 390954 }, - { url = "https://files.pythonhosted.org/packages/16/09/4dd49ba0a32a45813debe5fb3897955541351ee8142f586303b271a02b40/watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60", size = 381133 }, - { url = "https://files.pythonhosted.org/packages/76/59/5aa6fc93553cd8d8ee75c6247763d77c02631aed21551a97d94998bf1dae/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407", size = 449516 }, - { url = "https://files.pythonhosted.org/packages/4c/aa/df4b6fe14b6317290b91335b23c96b488d365d65549587434817e06895ea/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d", size = 454820 }, - { url = "https://files.pythonhosted.org/packages/5e/71/185f8672f1094ce48af33252c73e39b48be93b761273872d9312087245f6/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d", size = 481550 }, - { url = "https://files.pythonhosted.org/packages/85/d7/50ebba2c426ef1a5cb17f02158222911a2e005d401caf5d911bfca58f4c4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b", size = 518647 }, - { url = "https://files.pythonhosted.org/packages/f0/7a/4c009342e393c545d68987e8010b937f72f47937731225b2b29b7231428f/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590", size = 497547 }, - { url = "https://files.pythonhosted.org/packages/0f/7c/1cf50b35412d5c72d63b2bf9a4fffee2e1549a245924960dd087eb6a6de4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902", size = 452179 }, - { url = "https://files.pythonhosted.org/packages/d6/a9/3db1410e1c1413735a9a472380e4f431ad9a9e81711cda2aaf02b7f62693/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1", size = 614125 }, - { url = "https://files.pythonhosted.org/packages/f2/e1/0025d365cf6248c4d1ee4c3d2e3d373bdd3f6aff78ba4298f97b4fad2740/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303", size = 611911 }, - { url = "https://files.pythonhosted.org/packages/55/55/035838277d8c98fc8c917ac9beeb0cd6c59d675dc2421df5f9fcf44a0070/watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80", size = 271152 }, - { url = "https://files.pythonhosted.org/packages/f0/e5/96b8e55271685ddbadc50ce8bc53aa2dff278fb7ac4c2e473df890def2dc/watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc", size = 285216 }, ] [[package]] @@ -5233,17 +4844,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cf/53/1bf0c06618b5ac35f1d7906444b9958f8485682ab0ea40dee7b17a32da1e/websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05", size = 168712 }, { url = "https://files.pythonhosted.org/packages/e5/22/5ec2f39fff75f44aa626f86fa7f20594524a447d9c3be94d8482cd5572ef/websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0", size = 162838 }, { url = "https://files.pythonhosted.org/packages/74/27/28f07df09f2983178db7bf6c9cccc847205d2b92ced986cd79565d68af4f/websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f", size = 163277 }, - { url = "https://files.pythonhosted.org/packages/34/77/812b3ba5110ed8726eddf9257ab55ce9e85d97d4aa016805fdbecc5e5d48/websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9", size = 161966 }, - { url = "https://files.pythonhosted.org/packages/8d/24/4fcb7aa6986ae7d9f6d083d9d53d580af1483c5ec24bdec0978307a0f6ac/websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b", size = 159625 }, - { url = "https://files.pythonhosted.org/packages/f8/47/2a0a3a2fc4965ff5b9ce9324d63220156bd8bedf7f90824ab92a822e65fd/websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3", size = 159857 }, - { url = "https://files.pythonhosted.org/packages/dd/c8/d7b425011a15e35e17757e4df75b25e1d0df64c0c315a44550454eaf88fc/websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59", size = 169635 }, - { url = "https://files.pythonhosted.org/packages/93/39/6e3b5cffa11036c40bd2f13aba2e8e691ab2e01595532c46437b56575678/websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2", size = 168578 }, - { url = "https://files.pythonhosted.org/packages/cf/03/8faa5c9576299b2adf34dcccf278fc6bbbcda8a3efcc4d817369026be421/websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da", size = 169018 }, - { url = "https://files.pythonhosted.org/packages/8c/05/ea1fec05cc3a60defcdf0bb9f760c3c6bd2dd2710eff7ac7f891864a22ba/websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9", size = 169383 }, - { url = "https://files.pythonhosted.org/packages/21/1d/eac1d9ed787f80754e51228e78855f879ede1172c8b6185aca8cef494911/websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7", size = 168773 }, - { url = "https://files.pythonhosted.org/packages/0e/1b/e808685530185915299740d82b3a4af3f2b44e56ccf4389397c7a5d95d39/websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a", size = 168757 }, - { url = "https://files.pythonhosted.org/packages/b6/19/6ab716d02a3b068fbbeb6face8a7423156e12c446975312f1c7c0f4badab/websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6", size = 162834 }, - { url = "https://files.pythonhosted.org/packages/6c/fd/ab6b7676ba712f2fc89d1347a4b5bdc6aa130de10404071f2b2606450209/websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0", size = 163277 }, { url = "https://files.pythonhosted.org/packages/b0/0b/c7e5d11020242984d9d37990310520ed663b942333b83a033c2f20191113/websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e", size = 156277 }, ] @@ -5293,28 +4893,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, - { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, - { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, - { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, - { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, - { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, - { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, - { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, - { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, - { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, - { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, - { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, - { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, - { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, - { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, - { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, - { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, - { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, - { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, - { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, - { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, - { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, - { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, ] @@ -5393,21 +4971,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170 }, { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040 }, { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796 }, - { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795 }, - { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792 }, - { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950 }, - { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980 }, - { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324 }, - { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370 }, - { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911 }, - { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352 }, - { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410 }, - { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322 }, - { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725 }, - { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070 }, - { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172 }, - { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041 }, - { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801 }, ] [[package]] @@ -5453,22 +5016,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/97/8a/568d07c5d4964da5b02621a517532adb8ec5ba181ad1687191fffeda0ab6/yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", size = 357861 }, { url = "https://files.pythonhosted.org/packages/7d/e3/924c3f64b6b3077889df9a1ece1ed8947e7b61b0a933f2ec93041990a677/yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", size = 84097 }, { url = "https://files.pythonhosted.org/packages/34/45/0e055320daaabfc169b21ff6174567b2c910c45617b0d79c68d7ab349b02/yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", size = 90399 }, - { url = "https://files.pythonhosted.org/packages/30/c7/c790513d5328a8390be8f47be5d52e141f78b66c6c48f48d241ca6bd5265/yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb", size = 140789 }, - { url = "https://files.pythonhosted.org/packages/30/aa/a2f84e93554a578463e2edaaf2300faa61c8701f0898725842c704ba5444/yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa", size = 94144 }, - { url = "https://files.pythonhosted.org/packages/c6/fc/d68d8f83714b221a85ce7866832cba36d7c04a68fa6a960b908c2c84f325/yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782", size = 91974 }, - { url = "https://files.pythonhosted.org/packages/56/4e/d2563d8323a7e9a414b5b25341b3942af5902a2263d36d20fb17c40411e2/yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0", size = 333587 }, - { url = "https://files.pythonhosted.org/packages/25/c9/cfec0bc0cac8d054be223e9f2c7909d3e8442a856af9dbce7e3442a8ec8d/yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482", size = 344386 }, - { url = "https://files.pythonhosted.org/packages/ab/5d/4c532190113b25f1364d25f4c319322e86232d69175b91f27e3ebc2caf9a/yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186", size = 345421 }, - { url = "https://files.pythonhosted.org/packages/23/d1/6cdd1632da013aa6ba18cee4d750d953104a5e7aac44e249d9410a972bf5/yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58", size = 339384 }, - { url = "https://files.pythonhosted.org/packages/9a/c4/6b3c39bec352e441bd30f432cda6ba51681ab19bb8abe023f0d19777aad1/yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53", size = 326689 }, - { url = "https://files.pythonhosted.org/packages/23/30/07fb088f2eefdc0aa4fc1af4e3ca4eb1a3aadd1ce7d866d74c0f124e6a85/yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2", size = 345453 }, - { url = "https://files.pythonhosted.org/packages/63/09/d54befb48f9cd8eec43797f624ec37783a0266855f4930a91e3d5c7717f8/yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8", size = 341872 }, - { url = "https://files.pythonhosted.org/packages/91/26/fd0ef9bf29dd906a84b59f0cd1281e65b0c3e08c6aa94b57f7d11f593518/yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1", size = 347497 }, - { url = "https://files.pythonhosted.org/packages/d9/b5/14ac7a256d0511b2ac168d50d4b7d744aea1c1aa20c79f620d1059aab8b2/yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a", size = 359981 }, - { url = "https://files.pythonhosted.org/packages/ca/b3/d493221ad5cbd18bc07e642894030437e405e1413c4236dd5db6e46bcec9/yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10", size = 366229 }, - { url = "https://files.pythonhosted.org/packages/04/56/6a3e2a5d9152c56c346df9b8fb8edd2c8888b1e03f96324d457e5cf06d34/yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8", size = 360383 }, - { url = "https://files.pythonhosted.org/packages/fd/b7/4b3c7c7913a278d445cc6284e59b2e62fa25e72758f888b7a7a39eb8423f/yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d", size = 310152 }, - { url = "https://files.pythonhosted.org/packages/f5/d5/688db678e987c3e0fb17867970700b92603cadf36c56e5fb08f23e822a0c/yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c", size = 315723 }, { url = "https://files.pythonhosted.org/packages/f5/4b/a06e0ec3d155924f77835ed2d167ebd3b211a7b0853da1cf8d8414d784ef/yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", size = 45109 }, ] From 1a2d81bc01ecfbd0578902196834ab317f1e8d5a Mon Sep 17 00:00:00 2001 From: silentoplayz <50341825+silentoplayz@users.noreply.github.com> Date: Fri, 14 Feb 2025 12:48:33 -0500 Subject: [PATCH 011/623] Update main.py Change ASCII font for "Open WebUI" from "Standard" to "ANSI Shadow" --- backend/open_webui/main.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index a363231512b..71142978c92 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -348,12 +348,12 @@ async def get_response(self, path: str, scope): print( rf""" - ___ __ __ _ _ _ ___ - / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _| -| | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || | -| |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || | - \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___| - |_| + ██████╗ ██████╗ ███████╗███╗ ██╗ ██╗ ██╗███████╗██████╗ ██╗ ██╗██╗ +██╔═══██╗██╔══██╗██╔════╝████╗ ██║ ██║ ██║██╔════╝██╔══██╗██║ ██║██║ +██║ ██║██████╔╝█████╗ ██╔██╗ ██║ ██║ █╗ ██║█████╗ ██████╔╝██║ ██║██║ +██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║ ██║███╗██║██╔══╝ ██╔══██╗██║ ██║██║ +╚██████╔╝██║ ███████╗██║ ╚████║ ╚███╔███╔╝███████╗██████╔╝╚██████╔╝██║ + ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝ ╚══╝╚══╝ ╚══════╝╚═════╝ ╚═════╝ ╚═╝ v{VERSION} - building the best open-source AI user interface. From cdc13e9960842b93e16fb7fb3c1d4f1cf5681829 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Tue, 18 Feb 2025 23:15:57 +0900 Subject: [PATCH 012/623] chore: Fix unnecessary whitespace issues --- backend/open_webui/utils/middleware.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 93edc8f729d..0a1a9ee7f4a 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -360,9 +360,7 @@ async def chat_web_search_handler( }, } ) - files = form_data.get("files", []) - if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT: files.append( { From a381ffc58518cec12a13630c4c0f277122ceabb5 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Tue, 18 Feb 2025 23:17:06 +0900 Subject: [PATCH 013/623] revert --- backend/open_webui/utils/middleware.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 0a1a9ee7f4a..93edc8f729d 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -360,7 +360,9 @@ async def chat_web_search_handler( }, } ) + files = form_data.get("files", []) + if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT: files.append( { From 4a2a12fd21c15f1671f82315b7897a0f9af4f6f3 Mon Sep 17 00:00:00 2001 From: dannyl1u Date: Wed, 19 Feb 2025 10:33:49 -0800 Subject: [PATCH 014/623] feat: scaffolding for logit_bias --- backend/open_webui/utils/payload.py | 1 + .../Settings/Advanced/AdvancedParams.svelte | 44 +++++++++++++++++++ .../components/chat/Settings/General.svelte | 3 ++ 3 files changed, 48 insertions(+) diff --git a/backend/open_webui/utils/payload.py b/backend/open_webui/utils/payload.py index 5eb040434b2..d078362ee54 100644 --- a/backend/open_webui/utils/payload.py +++ b/backend/open_webui/utils/payload.py @@ -61,6 +61,7 @@ def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict: "reasoning_effort": str, "seed": lambda x: x, "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x], + "logit_bias": lambda x: x, } return apply_model_params_to_body(params, form_data, mappings) diff --git a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte index d0648bba568..5e53b999e48 100644 --- a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte +++ b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte @@ -17,6 +17,7 @@ stop: null, temperature: null, reasoning_effort: null, + logit_bias: null, frequency_penalty: null, repeat_last_n: null, mirostat: null, @@ -298,6 +299,49 @@ {/if} +
+ +
+
+ {$i18n.t('Logit Bias')} +
+ +
+
+ + {#if (params?.logit_bias ?? null) !== null} +
+
+ +
+
+ {/if} +
+
Date: Wed, 19 Feb 2025 16:23:58 -0800 Subject: [PATCH 015/623] include logit_bias in form_data --- backend/open_webui/utils/middleware.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index ba55c095edb..359ef775c8c 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -591,6 +591,11 @@ def apply_params_to_form_data(form_data, model): if "reasoning_effort" in params: form_data["reasoning_effort"] = params["reasoning_effort"] + if "logit_bias" in params: + try: + form_data["logit_bias"] = json.loads(params["logit_bias"]) + except json.JSONDecodeError: + print("Invalid JSON format for logit_bias") return form_data From 39c6ecb50d79111048fef7710edd935a6b98aea4 Mon Sep 17 00:00:00 2001 From: Victor Ribeiro Date: Thu, 20 Feb 2025 11:15:38 -0300 Subject: [PATCH 016/623] feat: add audit logger middleware and custom logger using loguru --- backend/open_webui/env.py | 22 +++ backend/open_webui/main.py | 20 +++ backend/open_webui/utils/audit.py | 249 +++++++++++++++++++++++++++++ backend/open_webui/utils/logger.py | 140 ++++++++++++++++ backend/requirements.txt | 3 + 5 files changed, 434 insertions(+) create mode 100644 backend/open_webui/utils/audit.py create mode 100644 backend/open_webui/utils/logger.py diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 96e288d7779..ba546a2eb5a 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -419,3 +419,25 @@ def parse_section(section): if OFFLINE_MODE: os.environ["HF_HUB_OFFLINE"] = "1" + +#################################### +# AUDIT LOGGING +#################################### +ENABLE_AUDIT_LOGS = os.getenv("ENABLE_AUDIT_LOGS", "false").lower() == "true" +# Where to store log file +AUDIT_LOGS_FILE_PATH = f"{DATA_DIR}/audit.log" +# Maximum size of a file before rotating into a new log file +AUDIT_LOG_FILE_ROTATION_SIZE = os.getenv("AUDIT_LOG_FILE_ROTATION_SIZE", "10MB") +# METADATA | REQUEST | REQUEST_RESPONSE +AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "REQUEST_RESPONSE").upper() +try: + MAX_BODY_LOG_SIZE = int(os.environ.get("MAX_BODY_LOG_SIZE") or 2048) +except ValueError: + MAX_BODY_LOG_SIZE = 2048 + +# Comma separated list for urls to exclude from audit +AUDIT_EXCLUDED_PATHS = os.getenv("AUDIT_EXCLUDED_PATHS", "/chats,/chat,/folders").split( + "," +) +AUDIT_EXCLUDED_PATHS = [path.strip() for path in AUDIT_EXCLUDED_PATHS] +AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS] diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 346d28d6c3d..e2f97dddaf7 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -45,6 +45,9 @@ from starlette.responses import Response, StreamingResponse +from open_webui.utils import logger +from open_webui.utils.audit import AuditLevel, AuditLoggingMiddleware +from open_webui.utils.logger import start_logger from open_webui.socket.main import ( app as socket_app, periodic_usage_pool_cleanup, @@ -298,8 +301,11 @@ reset_config, ) from open_webui.env import ( + AUDIT_EXCLUDED_PATHS, + AUDIT_LOG_LEVEL, CHANGELOG, GLOBAL_LOG_LEVEL, + MAX_BODY_LOG_SIZE, SAFE_MODE, SRC_LOG_LEVELS, VERSION, @@ -384,6 +390,7 @@ async def get_response(self, path: str, scope): @asynccontextmanager async def lifespan(app: FastAPI): + start_logger() if RESET_CONFIG_ON_START: reset_config() @@ -879,6 +886,19 @@ async def inspect_websocket(request: Request, call_next): app.include_router(utils.router, prefix="/api/v1/utils", tags=["utils"]) +try: + audit_level = AuditLevel(AUDIT_LOG_LEVEL) +except ValueError as e: + logger.error(f"Invalid audit level: {AUDIT_LOG_LEVEL}. Error: {e}") + audit_level = AuditLevel.NONE + +if audit_level != AuditLevel.NONE: + app.add_middleware( + AuditLoggingMiddleware, + audit_level=audit_level, + excluded_paths=AUDIT_EXCLUDED_PATHS, + max_body_size=MAX_BODY_LOG_SIZE, + ) ################################## # # Chat Endpoints diff --git a/backend/open_webui/utils/audit.py b/backend/open_webui/utils/audit.py new file mode 100644 index 00000000000..95c0745a981 --- /dev/null +++ b/backend/open_webui/utils/audit.py @@ -0,0 +1,249 @@ +from contextlib import asynccontextmanager +from dataclasses import asdict, dataclass +from enum import Enum +import re +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Dict, + MutableMapping, + Optional, + cast, +) +import uuid + +from asgiref.typing import ( + ASGI3Application, + ASGIReceiveCallable, + ASGIReceiveEvent, + ASGISendCallable, + ASGISendEvent, + Scope as ASGIScope, +) +from loguru import logger +from starlette.requests import Request + +from open_webui.env import AUDIT_LOG_LEVEL, MAX_BODY_LOG_SIZE +from open_webui.utils.auth import get_current_user, get_http_authorization_cred +from open_webui.models.users import UserModel + + +if TYPE_CHECKING: + from loguru import Logger + + +@dataclass(frozen=True) +class AuditLogEntry: + # `Metadata` audit level properties + id: str + user: dict[str, Any] + audit_level: str + verb: str + request_uri: str + user_agent: Optional[str] = None + source_ip: Optional[str] = None + # `Request` audit level properties + request_object: Any = None + # `Request Response` level + response_object: Any = None + response_status_code: Optional[int] = None + + +class AuditLevel(str, Enum): + NONE = "NONE" + METADATA = "METADATA" + REQUEST = "REQUEST" + REQUEST_RESPONSE = "REQUEST_RESPONSE" + + +class AuditLogger: + """ + A helper class that encapsulates audit logging functionality. It uses Loguru’s logger with an auditable binding to ensure that audit log entries are filtered correctly. + + Parameters: + logger (Logger): An instance of Loguru’s logger. + """ + + def __init__(self, logger: "Logger"): + self.logger = logger.bind(auditable=True) + + def write( + self, + audit_entry: AuditLogEntry, + *, + log_level: str = "INFO", + extra: Optional[dict] = None, + ): + + entry = asdict(audit_entry) + + if extra: + entry["extra"] = extra + + self.logger.log( + log_level, + "", + **entry, + ) + + +class AuditContext: + """ + Captures and aggregates the HTTP request and response bodies during the processing of a request. It ensures that only a configurable maximum amount of data is stored to prevent excessive memory usage. + + Attributes: + request_body (bytearray): Accumulated request payload. + response_body (bytearray): Accumulated response payload. + max_body_size (int): Maximum number of bytes to capture. + metadata (Dict[str, Any]): A dictionary to store additional audit metadata (user, http verb, user agent, etc.). + """ + + def __init__(self, max_body_size: int = MAX_BODY_LOG_SIZE): + self.request_body = bytearray() + self.response_body = bytearray() + self.max_body_size = max_body_size + self.metadata: Dict[str, Any] = {} + + def add_request_chunk(self, chunk: bytes): + if len(self.request_body) < self.max_body_size: + self.request_body.extend( + chunk[: self.max_body_size - len(self.request_body)] + ) + + def add_response_chunk(self, chunk: bytes): + if len(self.response_body) < self.max_body_size: + self.response_body.extend( + chunk[: self.max_body_size - len(self.response_body)] + ) + + +class AuditLoggingMiddleware: + """ + ASGI middleware that intercepts HTTP requests and responses to perform audit logging. It captures request/response bodies (depending on audit level), headers, HTTP methods, and user information, then logs a structured audit entry at the end of the request cycle. + """ + + AUDITED_METHODS = {"PUT", "PATCH", "DELETE", "POST"} + + def __init__( + self, + app: ASGI3Application, + *, + excluded_paths: Optional[list[str]] = None, + max_body_size: int = MAX_BODY_LOG_SIZE, + audit_level: AuditLevel = AuditLevel.NONE, + ) -> None: + self.app = app + self.audit_logger = AuditLogger(logger) + self.excluded_paths = excluded_paths or [] + self.max_body_size = max_body_size + self.audit_level = audit_level + + async def __call__( + self, + scope: ASGIScope, + receive: ASGIReceiveCallable, + send: ASGISendCallable, + ) -> None: + if scope["type"] != "http": + return await self.app(scope, receive, send) + + request = Request(scope=cast(MutableMapping, scope)) + + if self._should_skip_auditing(request): + return await self.app(scope, receive, send) + + async with self._audit_context(request) as context: + + async def send_wrapper(message: ASGISendEvent) -> None: + if self.audit_level == AuditLevel.REQUEST_RESPONSE: + await self._capture_response(message, context) + + await send(message) + + original_receive = receive + + async def receive_wrapper() -> ASGIReceiveEvent: + nonlocal original_receive + message = await original_receive() + + if self.audit_level in ( + AuditLevel.REQUEST, + AuditLevel.REQUEST_RESPONSE, + ): + await self._capture_request(message, context) + + return message + + await self.app(scope, receive_wrapper, send_wrapper) + + @asynccontextmanager + async def _audit_context( + self, request: Request + ) -> AsyncGenerator[AuditContext, None]: + """ + async context manager that ensures that an audit log entry is recorded after the request is processed. + """ + context = AuditContext() + try: + yield context + finally: + await self._log_audit_entry(request, context) + + async def _get_authenticated_user(self, request: Request) -> UserModel: + + auth_header = request.headers.get("Authorization") + assert auth_header + user = get_current_user(request, get_http_authorization_cred(auth_header)) + + return user + + def _should_skip_auditing(self, request: Request) -> bool: + if ( + request.method not in {"POST", "PUT", "PATCH", "DELETE"} + or AUDIT_LOG_LEVEL == "NONE" + or not request.headers.get("authorization") + ): + return True + # match either /api//...(for the endpoint /api/chat case) or /api/v1//... + pattern = re.compile( + r"^/api(?:/v1)?/(" + "|".join(self.excluded_paths) + r")\b" + ) + if pattern.match(request.url.path): + return True + + return False + + async def _capture_request(self, message: ASGIReceiveEvent, context: AuditContext): + if message["type"] == "http.request": + body = message.get("body", b"") + context.add_request_chunk(body) + + async def _capture_response(self, message: ASGISendEvent, context: AuditContext): + if message["type"] == "http.response.start": + context.metadata["response_status_code"] = message["status"] + + elif message["type"] == "http.response.body": + body = message.get("body", b"") + context.add_response_chunk(body) + + async def _log_audit_entry(self, request: Request, context: AuditContext): + try: + user = await self._get_authenticated_user(request) + + entry = AuditLogEntry( + id=str(uuid.uuid4()), + user=user.model_dump(include={"id", "name", "email", "role"}), + audit_level=self.audit_level.value, + verb=request.method, + request_uri=str(request.url), + response_status_code=context.metadata.get("response_status_code", None), + source_ip=request.client.host if request.client else None, + user_agent=request.headers.get("user-agent"), + request_object=context.request_body.decode("utf-8", errors="replace"), + response_object=context.response_body.decode("utf-8", errors="replace"), + ) + + self.audit_logger.write(entry) + except Exception as e: + logger.error(f"Failed to log audit entry: {str(e)}") diff --git a/backend/open_webui/utils/logger.py b/backend/open_webui/utils/logger.py new file mode 100644 index 00000000000..2557610060f --- /dev/null +++ b/backend/open_webui/utils/logger.py @@ -0,0 +1,140 @@ +import json +import logging +import sys +from typing import TYPE_CHECKING + +from loguru import logger + +from open_webui.env import ( + AUDIT_LOG_FILE_ROTATION_SIZE, + AUDIT_LOG_LEVEL, + AUDIT_LOGS_FILE_PATH, + GLOBAL_LOG_LEVEL, +) + + +if TYPE_CHECKING: + from loguru import Record + + +def stdout_format(record: "Record") -> str: + """ + Generates a formatted string for log records that are output to the console. This format includes a timestamp, log level, source location (module, function, and line), the log message, and any extra data (serialized as JSON). + + Parameters: + record (Record): A Loguru record that contains logging details including time, level, name, function, line, message, and any extra context. + Returns: + str: A formatted log string intended for stdout. + """ + record["extra"]["extra_json"] = json.dumps(record["extra"]) + return ( + "{time:YYYY-MM-DD HH:mm:ss.SSS} | " + "{level: <8} | " + "{name}:{function}:{line} - " + "{message} - {extra[extra_json]}" + "\n{exception}" + ) + + +class InterceptHandler(logging.Handler): + """ + Intercepts log records from Python's standard logging module + and redirects them to Loguru's logger. + """ + + def emit(self, record): + """ + Called by the standard logging module for each log event. + It transforms the standard `LogRecord` into a format compatible with Loguru + and passes it to Loguru's logger. + """ + try: + level = logger.level(record.levelname).name + except ValueError: + level = record.levelno + + frame, depth = sys._getframe(6), 6 + while frame and frame.f_code.co_filename == logging.__file__: + frame = frame.f_back + depth += 1 + + logger.opt(depth=depth, exception=record.exc_info).log( + level, record.getMessage() + ) + + +def file_format(record: "Record"): + """ + Formats audit log records into a structured JSON string for file output. + + Parameters: + record (Record): A Loguru record containing extra audit data. + Returns: + str: A JSON-formatted string representing the audit data. + """ + + audit_data = { + "id": record["extra"].get("id", ""), + "timestamp": int(record["time"].timestamp()), + "user": record["extra"].get("user", dict()), + "audit_level": record["extra"].get("audit_level", ""), + "verb": record["extra"].get("verb", ""), + "request_uri": record["extra"].get("request_uri", ""), + "response_status_code": record["extra"].get("response_status_code", 0), + "source_ip": record["extra"].get("source_ip", ""), + "user_agent": record["extra"].get("user_agent", ""), + "request_object": record["extra"].get("request_object", b""), + "response_object": record["extra"].get("response_object", b""), + "extra": record["extra"].get("extra", {}), + } + + record["extra"]["file_extra"] = json.dumps(audit_data, default=str) + return "{extra[file_extra]}\n" + + +def start_logger(): + """ + Initializes and configures Loguru's logger with distinct handlers: + + A console (stdout) handler for general log messages (excluding those marked as auditable). + An optional file handler for audit logs if audit logging is enabled. + Additionally, this function reconfigures Python’s standard logging to route through Loguru and adjusts logging levels for Uvicorn. + + Parameters: + enable_audit_logging (bool): Determines whether audit-specific log entries should be recorded to file. + """ + logger.remove() + + logger.add( + sys.stdout, + level=GLOBAL_LOG_LEVEL, + format=stdout_format, + filter=lambda record: "auditable" not in record["extra"], + ) + + if AUDIT_LOG_LEVEL != "NONE": + try: + logger.add( + AUDIT_LOGS_FILE_PATH, + level="INFO", + rotation=AUDIT_LOG_FILE_ROTATION_SIZE, + compression="zip", + format=file_format, + filter=lambda record: record["extra"].get("auditable") is True, + ) + except Exception as e: + logger.error(f"Failed to initialize audit log file handler: {str(e)}") + + logging.basicConfig( + handlers=[InterceptHandler()], level=GLOBAL_LOG_LEVEL, force=True + ) + for uvicorn_logger_name in ["uvicorn", "uvicorn.error"]: + uvicorn_logger = logging.getLogger(uvicorn_logger_name) + uvicorn_logger.setLevel(GLOBAL_LOG_LEVEL) + uvicorn_logger.handlers = [] + for uvicorn_logger_name in ["uvicorn.access"]: + uvicorn_logger = logging.getLogger(uvicorn_logger_name) + uvicorn_logger.setLevel(GLOBAL_LOG_LEVEL) + uvicorn_logger.handlers = [InterceptHandler()] + + logger.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}") diff --git a/backend/requirements.txt b/backend/requirements.txt index 965741f7812..a04f4910517 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -31,6 +31,9 @@ APScheduler==3.10.4 RestrictedPython==8.0 +loguru==0.7.2 +asgiref==3.8.1 + # AI libraries openai anthropic From 08df5f6d7cc43d478d96fd00c801db773e0ebe3d Mon Sep 17 00:00:00 2001 From: Aleix Dorca Date: Thu, 20 Feb 2025 20:24:58 +0100 Subject: [PATCH 017/623] Update catalan translation.json --- src/lib/i18n/locales/ca-ES/translation.json | 100 ++++++++++---------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index e99cbbce4d1..963fdcd609f 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -20,7 +20,7 @@ "Account Activation Pending": "Activació del compte pendent", "Accurate information": "Informació precisa", "Actions": "Accions", - "Activate": "", + "Activate": "Activar", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Activa aquest comanda escrivint \"{{COMMAND}}\" en el xat", "Active Users": "Usuaris actius", "Add": "Afegir", @@ -100,7 +100,7 @@ "Audio": "Àudio", "August": "Agost", "Authenticate": "Autenticar", - "Authentication": "", + "Authentication": "Autenticació", "Auto-Copy Response to Clipboard": "Copiar la resposta automàticament al porta-retalls", "Auto-playback response": "Reproduir la resposta automàticament", "Autocomplete Generation": "Generació automàtica", @@ -124,11 +124,11 @@ "Beta": "Beta", "Bing Search V7 Endpoint": "Punt de connexió a Bing Search V7", "Bing Search V7 Subscription Key": "Clau de subscripció a Bing Search V7", - "Bocha Search API Key": "", + "Bocha Search API Key": "Clau API de Bocha Search", "Brave Search API Key": "Clau API de Brave Search", "By {{name}}": "Per {{name}}", "Bypass SSL verification for Websites": "Desactivar la verificació SSL per a l'accés a Internet", - "Calendar": "", + "Calendar": "Calendari", "Call": "Trucada", "Call feature is not supported when using Web STT engine": "La funció de trucada no s'admet quan s'utilitza el motor Web STT", "Camera": "Càmera", @@ -180,13 +180,13 @@ "Clone of {{TITLE}}": "Clon de {{TITLE}}", "Close": "Tancar", "Code execution": "Execució de codi", - "Code Execution": "", - "Code Execution Engine": "", - "Code Execution Timeout": "", + "Code Execution": "Excució de Codi", + "Code Execution Engine": "Motor d'execució de codi", + "Code Execution Timeout": "Temps màxim d'execució de codi", "Code formatted successfully": "Codi formatat correctament", "Code Interpreter": "Intèrpret de codi", - "Code Interpreter Engine": "", - "Code Interpreter Prompt Template": "", + "Code Interpreter Engine": "Motor de l'intèrpret de codi", + "Code Interpreter Prompt Template": "Plantilla de la indicació de l'intèrpret de codi", "Collection": "Col·lecció", "Color": "Color", "ComfyUI": "ComfyUI", @@ -203,7 +203,7 @@ "Confirm Password": "Confirmar la contrasenya", "Confirm your action": "Confirma la teva acció", "Confirm your new password": "Confirma la teva nova contrasenya", - "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI", "Connections": "Connexions", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament. (Per defecte: mitjà)", "Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI", @@ -215,7 +215,7 @@ "Continue with Email": "Continuar amb el correu", "Continue with LDAP": "Continuar amb LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar com es divideix el text del missatge per a les sol·licituds TTS. 'Puntuació' divideix en frases, 'paràgrafs' divideix en paràgrafs i 'cap' manté el missatge com una cadena única.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Controlar la repetició de seqüències de tokens en el text generat. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 1,1) serà més indulgent. A l'1, està desactivat. (Per defecte: 1.1)", "Controls": "Controls", "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar l'equilibri entre la coherència i la diversitat de la sortida. Un valor més baix donarà lloc a un text més enfocat i coherent. (Per defecte: 5.0)", "Copied": "Copiat", @@ -227,7 +227,7 @@ "Copy Link": "Copiar l'enllaç", "Copy to clipboard": "Copiar al porta-retalls", "Copying to clipboard was successful!": "La còpia al porta-retalls s'ha realitzat correctament", - "CORS must be properly configured by the provider to allow requests from Open WebUI.": "", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "CORS ha de ser configurat correctament pel proveïdor per permetre les sol·licituds d'Open WebUI", "Create": "Crear", "Create a knowledge base": "Crear una base de coneixement", "Create a model": "Crear un model", @@ -271,7 +271,7 @@ "Delete folder?": "Eliminar la carpeta?", "Delete function?": "Eliminar funció?", "Delete Message": "Eleiminar el missatge", - "Delete message?": "", + "Delete message?": "Eliminar el missatge?", "Delete prompt?": "Eliminar indicació?", "delete this link": "Eliminar aquest enllaç", "Delete tool?": "Eliminar eina?", @@ -282,15 +282,15 @@ "Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius", "Description": "Descripció", "Didn't fully follow instructions": "No s'han seguit les instruccions completament", - "Direct Connections": "", - "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", - "Direct Connections settings updated": "", + "Direct Connections": "Connexions directes", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.", + "Direct Connections settings updated": "Configuració de les connexions directes actualitzada", "Disabled": "Deshabilitat", "Discover a function": "Descobrir una funció", "Discover a model": "Descobrir un model", "Discover a prompt": "Descobrir una indicació", "Discover a tool": "Descobrir una eina", - "Discover how to use Open WebUI and seek support from the community.": "", + "Discover how to use Open WebUI and seek support from the community.": "Descobreix com utilitzar Open WebUI i demana suport a la comunitat.", "Discover wonders": "Descobrir meravelles", "Discover, download, and explore custom functions": "Descobrir, descarregar i explorar funcions personalitzades", "Discover, download, and explore custom prompts": "Descobrir, descarregar i explorar indicacions personalitzades", @@ -315,14 +315,14 @@ "Don't like the style": "No t'agrada l'estil?", "Done": "Fet", "Download": "Descarregar", - "Download as SVG": "", + "Download as SVG": "Descarrega com a SVG", "Download canceled": "Descàrrega cancel·lada", "Download Database": "Descarregar la base de dades", "Drag and drop a file to upload or select a file to view": "Arrossegar un arxiu per pujar o escull un arxiu a veure", "Draw": "Dibuixar", "Drop any files here to add to the conversation": "Deixa qualsevol arxiu aquí per afegir-lo a la conversa", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "p. ex. '30s','10m'. Les unitats de temps vàlides són 's', 'm', 'h'.", - "e.g. 60": "", + "e.g. 60": "p. ex. 60", "e.g. A filter to remove profanity from text": "p. ex. Un filtre per eliminar paraules malsonants del text", "e.g. My Filter": "p. ex. El meu filtre", "e.g. My Tools": "p. ex. Les meves eines", @@ -346,7 +346,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"", "Enable API Key": "Activar la Clau API", "Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat", - "Enable Code Interpreter": "", + "Enable Code Interpreter": "Activar l'intèrpret de codi", "Enable Community Sharing": "Activar l'ús compartit amb la comunitat", "Enable Google Drive": "Activar Google Drive", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.", @@ -365,7 +365,7 @@ "Enter Application DN Password": "Introdueix la contrasenya del DN d'aplicació", "Enter Bing Search V7 Endpoint": "Introdueix el punt de connexió de Bing Search V7", "Enter Bing Search V7 Subscription Key": "Introdueix la clau de subscripció de Bing Search V7", - "Enter Bocha Search API Key": "", + "Enter Bocha Search API Key": "Introdueix la clau API de Bocha Search", "Enter Brave Search API Key": "Introdueix la clau API de Brave Search", "Enter certificate path": "Introdueix el camí del certificat", "Enter CFG Scale (e.g. 7.0)": "Entra l'escala CFG (p.ex. 7.0)", @@ -379,9 +379,9 @@ "Enter Google PSE Engine Id": "Introdueix l'identificador del motor PSE de Google", "Enter Image Size (e.g. 512x512)": "Introdueix la mida de la imatge (p. ex. 512x512)", "Enter Jina API Key": "Introdueix la clau API de Jina", - "Enter Jupyter Password": "", - "Enter Jupyter Token": "", - "Enter Jupyter URL": "", + "Enter Jupyter Password": "Introdueix la contrasenya de Jupyter", + "Enter Jupyter Token": "Introdueix el token de Jupyter", + "Enter Jupyter URL": "Introdueix la URL de Jupyter", "Enter Kagi Search API Key": "Introdueix la clau API de Kagi Search", "Enter language codes": "Introdueix els codis de llenguatge", "Enter Model ID": "Introdueix l'identificador del model", @@ -397,8 +397,8 @@ "Enter SearchApi Engine": "Introdueix el motor SearchApi", "Enter Searxng Query URL": "Introdueix l'URL de consulta de Searxng", "Enter Seed": "Introdueix la llavor", - "Enter SerpApi API Key": "", - "Enter SerpApi Engine": "", + "Enter SerpApi API Key": "Introdueix la clau API SerpApi", + "Enter SerpApi Engine": "Introdueix el motor API SerpApi", "Enter Serper API Key": "Introdueix la clau API Serper", "Enter Serply API Key": "Introdueix la clau API Serply", "Enter Serpstack API Key": "Introdueix la clau API Serpstack", @@ -410,7 +410,7 @@ "Enter Tavily API Key": "Introdueix la clau API de Tavily", "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Entra la URL pública de WebUI. Aquesta URL s'utilitzarà per generar els enllaços en les notificacions.", "Enter Tika Server URL": "Introdueix l'URL del servidor Tika", - "Enter timeout in seconds": "", + "Enter timeout in seconds": "Entra el temps màxim en segons", "Enter Top K": "Introdueix Top K", "Enter URL (e.g. http://127.0.0.1:7860/)": "Introdueix l'URL (p. ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Introdueix l'URL (p. ex. http://localhost:11434)", @@ -458,7 +458,7 @@ "Failed to save models configuration": "No s'ha pogut desar la configuració dels models", "Failed to update settings": "No s'han pogut actualitzar les preferències", "Failed to upload file.": "No s'ha pogut pujar l'arxiu.", - "Features": "", + "Features": "Característiques", "Features Permissions": "Permisos de les característiques", "February": "Febrer", "Feedback History": "Històric de comentaris", @@ -488,7 +488,7 @@ "Form": "Formulari", "Format your variables using brackets like this:": "Formata les teves variables utilitzant claudàtors així:", "Frequency Penalty": "Penalització per freqüència", - "Full Context Mode": "", + "Full Context Mode": "Mode de context complert", "Function": "Funció", "Function Calling": "Crida a funcions", "Function created successfully": "La funció s'ha creat correctament", @@ -503,9 +503,9 @@ "Functions allow arbitrary code execution": "Les funcions permeten l'execució de codi arbitrari", "Functions allow arbitrary code execution.": "Les funcions permeten l'execució de codi arbitrari.", "Functions imported successfully": "Les funcions s'han importat correctament", - "Gemini": "", - "Gemini API Config": "", - "Gemini API Key is required.": "", + "Gemini": "Gemini", + "Gemini API Config": "Configuració de Gemini API", + "Gemini API Key is required.": "La clau API de Gemini és necessària", "General": "General", "General Settings": "Preferències generals", "Generate an image": "Generar una imatge", @@ -532,7 +532,7 @@ "Hex Color": "Color hexadecimal", "Hex Color - Leave empty for default color": "Color hexadecimal - Deixar buit per a color per defecte", "Hide": "Amaga", - "Home": "", + "Home": "Inici", "Host": "Servidor", "How can I help you today?": "Com et puc ajudar avui?", "How would you rate this response?": "Com avaluaries aquesta resposta?", @@ -576,8 +576,8 @@ "JSON Preview": "Vista prèvia del document JSON", "July": "Juliol", "June": "Juny", - "Jupyter Auth": "", - "Jupyter URL": "", + "Jupyter Auth": "Autenticació Jupyter", + "Jupyter URL": "URL de Jupyter", "JWT Expiration": "Caducitat del JWT", "JWT Token": "Token JWT", "Kagi Search API Key": "Clau API de Kagi Search", @@ -607,12 +607,12 @@ "Leave empty to include all models or select specific models": "Deixa-ho en blanc per incloure tots els models o selecciona models específics", "Leave empty to use the default prompt, or enter a custom prompt": "Deixa-ho en blanc per utilitzar la indicació predeterminada o introdueix una indicació personalitzada", "Leave model field empty to use the default model.": "Deixa el camp de model buit per utilitzar el model per defecte.", - "License": "", + "License": "Llicència", "Light": "Clar", "Listening...": "Escoltant...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "Els models de llenguatge poden cometre errors. Verifica la informació important.", - "Loading Kokoro.js...": "", + "Loading Kokoro.js...": "Carregant Kokoro.js", "Local": "Local", "Local Models": "Models locals", "Lost": "Perdut", @@ -622,7 +622,7 @@ "Make sure to export a workflow.json file as API format from ComfyUI.": "Assegura't d'exportar un fitxer workflow.json com a format API des de ComfyUI.", "Manage": "Gestionar", "Manage Arena Models": "Gestionar els models de l'Arena", - "Manage Direct Connections": "", + "Manage Direct Connections": "Gestionar les connexions directes", "Manage Models": "Gestionar els models", "Manage Ollama": "Gestionar Ollama", "Manage Ollama API Connections": "Gestionar les connexions a l'API d'Ollama", @@ -766,7 +766,7 @@ "Plain text (.txt)": "Text pla (.txt)", "Playground": "Zona de jocs", "Please carefully review the following warnings:": "Si us plau, revisa els següents avisos amb cura:", - "Please do not close the settings page while loading the model.": "", + "Please do not close the settings page while loading the model.": "No tanquis la pàgina de configuració mentre carregues el model.", "Please enter a prompt": "Si us plau, entra una indicació", "Please fill in all fields.": "Emplena tots els camps, si us plau.", "Please select a model first.": "Si us plau, selecciona un model primer", @@ -776,7 +776,7 @@ "Positive attitude": "Actitud positiva", "Prefix ID": "Identificador del prefix", "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "L'identificador de prefix s'utilitza per evitar conflictes amb altres connexions afegint un prefix als ID de model; deixa'l en blanc per desactivar-lo.", - "Presence Penalty": "", + "Presence Penalty": "Penalització de presència", "Previous 30 days": "30 dies anteriors", "Previous 7 days": "7 dies anteriors", "Profile Image": "Imatge de perfil", @@ -813,7 +813,7 @@ "Rename": "Canviar el nom", "Reorder Models": "Reordenar els models", "Repeat Last N": "Repeteix els darrers N", - "Repeat Penalty (Ollama)": "", + "Repeat Penalty (Ollama)": "Penalització per repetició (Ollama)", "Reply in Thread": "Respondre al fil", "Request Mode": "Mode de sol·licitud", "Reranking Model": "Model de reavaluació", @@ -876,7 +876,7 @@ "Select a pipeline": "Seleccionar una Pipeline", "Select a pipeline url": "Seleccionar l'URL d'una Pipeline", "Select a tool": "Seleccionar una eina", - "Select an auth method": "", + "Select an auth method": "Seleccionar un mètode d'autenticació", "Select an Ollama instance": "Seleccionar una instància d'Ollama", "Select Engine": "Seleccionar el motor", "Select Knowledge": "Seleccionar coneixement", @@ -889,8 +889,8 @@ "Send message": "Enviar missatge", "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Envia `stream_options: { include_usage: true }` a la sol·licitud.\nEls proveïdors compatibles retornaran la informació d'ús del token a la resposta quan s'estableixi.", "September": "Setembre", - "SerpApi API Key": "", - "SerpApi Engine": "", + "SerpApi API Key": "Clau API de SerpApi", + "SerpApi Engine": "Motor de SerpApi", "Serper API Key": "Clau API de Serper", "Serply API Key": "Clau API de Serply", "Serpstack API Key": "Clau API de Serpstack", @@ -910,8 +910,8 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establir el nombre de fils de treball utilitzats per al càlcul. Aquesta opció controla quants fils s'utilitzen per processar les sol·licituds entrants simultàniament. Augmentar aquest valor pot millorar el rendiment amb càrregues de treball de concurrència elevada, però també pot consumir més recursos de CPU.", "Set Voice": "Establir la veu", "Set whisper model": "Establir el model whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Estableix un biaix pla contra tokens que han aparegut almenys una vegada. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 0)", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Estableix un biaix d'escala contra tokens per penalitzar les repeticions, en funció de quantes vegades han aparegut. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 1.1)", "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establir fins a quin punt el model mira enrere per evitar la repetició. (Per defecte: 64, 0 = desactivat, -1 = num_ctx)", "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establir la llavor del nombre aleatori que s'utilitzarà per a la generació. Establir-ho a un número específic farà que el model generi el mateix text per a la mateixa sol·licitud. (Per defecte: aleatori)", "Sets the size of the context window used to generate the next token. (Default: 2048)": "Estableix la mida de la finestra de context utilitzada per generar el següent token. (Per defecte: 2048)", @@ -958,7 +958,7 @@ "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració. (per defecte: 1)", "Tap to interrupt": "Prem per interrompre", - "Tasks": "", + "Tasks": "Tasques", "Tavily API Key": "Clau API de Tavily", "Tell us more:": "Dona'ns més informació:", "Temperature": "Temperatura", @@ -1005,7 +1005,7 @@ "Title (e.g. Tell me a fun fact)": "Títol (p. ex. Digues-me quelcom divertit)", "Title Auto-Generation": "Generació automàtica de títol", "Title cannot be an empty string.": "El títol no pot ser una cadena buida.", - "Title Generation": "", + "Title Generation": "Generació de títols", "Title Generation Prompt": "Indicació de generació de títol", "TLS": "TLS", "To access the available model names for downloading,": "Per accedir als noms dels models disponibles per descarregar,", @@ -1062,7 +1062,7 @@ "Updated": "Actualitzat", "Updated at": "Actualitzat el", "Updated At": "Actualitzat el", - "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Actualitzar a un pla amb llicència per obtenir capacitats millorades, com ara la temàtica personalitzada i la marca, i assistència dedicada.", "Upload": "Pujar", "Upload a GGUF model": "Pujar un model GGUF", "Upload directory": "Pujar directori", @@ -1101,7 +1101,7 @@ "Warning:": "Avís:", "Warning: Enabling this will allow users to upload arbitrary code on the server.": "Avís: Habilitar això permetrà als usuaris penjar codi arbitrari al servidor.", "Warning: If you update or change your embedding model, you will need to re-import all documents.": "Avís: Si s'actualitza o es canvia el model d'incrustació, s'hauran de tornar a importar tots els documents.", - "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "", + "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Avís: l'execució de Jupyter permet l'execució de codi arbitrari, la qual cosa comporta greus riscos de seguretat; procediu amb extrema precaució.", "Web": "Web", "Web API": "Web API", "Web Loader Settings": "Preferències del carregador web", From a8859a81454a55f80c15a72a3c87507ef23459a0 Mon Sep 17 00:00:00 2001 From: Simone Date: Thu, 20 Feb 2025 21:25:32 +0100 Subject: [PATCH 018/623] Fix on ollama to openai conversion - stream can return a single message with content --- backend/open_webui/utils/response.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/utils/response.py b/backend/open_webui/utils/response.py index bc47e1e1361..8c3f1a58ebc 100644 --- a/backend/open_webui/utils/response.py +++ b/backend/open_webui/utils/response.py @@ -104,7 +104,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response) data = json.loads(data) model = data.get("model", "ollama") - message_content = data.get("message", {}).get("content", "") + message_content = data.get("message", {}).get("content", None) tool_calls = data.get("message", {}).get("tool_calls", None) openai_tool_calls = None @@ -118,7 +118,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response) usage = convert_ollama_usage_to_openai(data) data = openai_chat_chunk_message_template( - model, message_content if not done else None, openai_tool_calls, usage + model, message_content, openai_tool_calls, usage ) line = f"data: {json.dumps(data)}\n\n" From 1332a0d3815c46bff5d4bf55c6ce1f6373cdd5f2 Mon Sep 17 00:00:00 2001 From: "Richard (Huangrui) Chu" <65276824+HuangruiChu@users.noreply.github.com> Date: Thu, 20 Feb 2025 16:07:32 -0500 Subject: [PATCH 019/623] Update zh-CN translation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. remove the typo ”‘’" for -1 表示无限制,正整数表示具体限制” 2. Token should be kept rather than translated as "标记" 3. Max Tokens (num_predict) should be "最大Token数量 (num_predict)" 4. "Enter Jupyter Token" here “Token" could be translated as ”令牌“ just as "JWT Token": "JWT 令牌", (line 582) 5. "TLS" which means "Transport Layer Security" should be translated to "传输层安全协议" 6. "Tokens To Keep On Context Refresh (num_keep)" "在语境刷新时需保留的 Token 数量" 7. change token to "Token" in the Chinese translation. --- src/lib/i18n/locales/zh-CN/translation.json | 22 ++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 66f6a591392..6bf556aa4e0 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -1,5 +1,5 @@ { - "-1 for no limit, or a positive integer for a specific limit": "-1 表示无限制,正整数表示具体限制”", + "-1 for no limit, or a positive integer for a specific limit": "-1 表示无限制,正整数表示具体限制", "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' 或 '-1' 表示无过期时间。", "(e.g. `sh webui.sh --api --api-auth username_password`)": "(例如 `sh webui.sh --api --api-auth username_password`)", "(e.g. `sh webui.sh --api`)": "(例如 `sh webui.sh --api`)", @@ -63,7 +63,7 @@ "Allow Voice Interruption in Call": "允许通话中的打断语音", "Allowed Endpoints": "允许的端点", "Already have an account?": "已经拥有账号了?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p的替代方法,目标是在质量和多样性之间取得平衡。参数p表示一个token相对于最有可能的token所需的最低概率。比如,当p=0.05且最有可能的token概率为0.9时,概率低于0.045的logits会被排除。(默认值:0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p的替代方法,目标是在质量和多样性之间取得平衡。参数p表示一个Token相对于最有可能的Token所需的最低概率。比如,当p=0.05且最有可能的Token概率为0.9时,概率低于0.045的logits会被排除。(默认值:0.0)", "Always": "保持", "Amazing": "很棒", "an assistant": "一个助手", @@ -380,7 +380,7 @@ "Enter Image Size (e.g. 512x512)": "输入图像分辨率 (例如:512x512)", "Enter Jina API Key": "输入 Jina API 密钥", "Enter Jupyter Password": "输入 Jupyter 密码", - "Enter Jupyter Token": "输入 Jupyter Token", + "Enter Jupyter Token": "输入 Jupyter 令牌", "Enter Jupyter URL": "输入 Jupyter URL", "Enter Kagi Search API Key": "输入 Kagi Search API 密钥", "Enter language codes": "输入语言代码", @@ -629,7 +629,7 @@ "Manage OpenAI API Connections": "管理OpenAI API连接", "Manage Pipelines": "管理 Pipeline", "March": "三月", - "Max Tokens (num_predict)": "最多 Token (num_predict)", + "Max Tokens (num_predict)": "最大Token数量 (num_predict)", "Max Upload Count": "最大上传数量", "Max Upload Size": "最大上传大小", "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "最多可以同时下载 3 个模型,请稍后重试。", @@ -910,14 +910,14 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "设置用于计算的工作线程数量。该选项可控制并发处理传入请求的线程数量。增加该值可以提高高并发工作负载下的性能,但也可能消耗更多的 CPU 资源。", "Set Voice": "设置音色", "Set whisper model": "设置 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "这个设置项用于调整对重复 tokens 的抑制强度。当某个 token 至少出现过一次后,系统会通过 flat bias 参数施加惩罚力度:数值越大(如 1.5),抑制重复的效果越强烈;数值较小(如 0.9)则相对宽容。当设为 0 时,系统会完全关闭这个重复抑制功能(默认值为 0)。", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "这个参数用于通过 scaling bias 机制抑制重复内容:当某些 tokens 重复出现时,系统会根据它们已出现的次数自动施加惩罚。数值越大(如 1.5)惩罚力度越强,能更有效减少重复;数值较小(如 0.9)则允许更多重复。当设为 0 时完全关闭该功能,默认值设置为 1.1 保持适度抑制。", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "这个设置项用于调整对重复 Token 的抑制强度。当某个 Token 至少出现过一次后,系统会通过 flat bias 参数施加惩罚力度:数值越大(如 1.5),抑制重复的效果越强烈;数值较小(如 0.9)则相对宽容。当设为 0 时,系统会完全关闭这个重复抑制功能(默认值为 0)。", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "这个参数用于通过 scaling bias 机制抑制重复内容:当某些 Token 重复出现时,系统会根据它们已出现的次数自动施加惩罚。数值越大(如 1.5)惩罚力度越强,能更有效减少重复;数值较小(如 0.9)则允许更多重复。当设为 0 时完全关闭该功能,默认值设置为 1.1 保持适度抑制。", "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "设置模型回溯多远以防止重复。(默认值:64,0 = 禁用,-1 = num_ctx)", "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "设置 random number seed 可以控制模型生成文本的随机起点。如果指定一个具体数字,当输入相同的提示语时,模型每次都会生成完全相同的文本内容(默认是随机选取 seed)。", "Sets the size of the context window used to generate the next token. (Default: 2048)": "设置用于生成下一个 Token 的上下文大小。(默认值:2048)", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "设置要使用的停止序列。遇到这种模式时,大语言模型将停止生成文本并返回。可以通过在模型文件中指定多个单独的停止参数来设置多个停止模式。", "Settings": "设置", - "Settings saved successfully!": "设置已保存", + "Settings saved successfully!": "设置已成功保存!", "Share": "分享", "Share Chat": "分享对话", "Share to Open WebUI Community": "分享到 OpenWebUI 社区", @@ -956,7 +956,7 @@ "System Prompt": "系统提示词 (System Prompt)", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling 用于减少输出中可能性较低的标记的影响。数值越大(如 2.0),影响就越小,而数值为 1.0 则会禁用此设置。(默认值:1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling 用于减少输出中可能性较低的Token的影响。数值越大(如 2.0),影响就越小,而数值为 1.0 则会禁用此设置。(默认值:1)", "Tap to interrupt": "点击以中断", "Tasks": "任务", "Tavily API Key": "Tavily API 密钥", @@ -985,7 +985,7 @@ "This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "该选项控制刷新上下文时保留多少标记。例如,如果设置为 2,就会保留对话上下文的最后 2 个标记。保留上下文有助于保持对话的连续性,但可能会降低回复新话题的能力。(默认值:24)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "该选项控制刷新上下文时保留多少Token。例如,如果设置为 2,就会保留对话上下文的最后 2 个Token。保留上下文有助于保持对话的连续性,但可能会降低回复新话题的能力。(默认值:24)", "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "此选项设置了模型在回答中可以生成的最大 Token 数。增加这个限制可以让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。 (默认值:128)", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。", "This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成", @@ -1007,7 +1007,7 @@ "Title cannot be an empty string.": "标题不能为空。", "Title Generation": "标题生成", "Title Generation Prompt": "用于自动生成标题的提示词", - "TLS": "TLS", + "TLS": "传输层安全协议", "To access the available model names for downloading,": "要访问可下载的模型名称,", "To access the GGUF models available for downloading,": "要访问可下载的 GGUF 模型,", "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "请联系管理员以访问。管理员可以在后台管理面板中管理用户状态。", @@ -1022,7 +1022,7 @@ "Toggle settings": "切换设置", "Toggle sidebar": "切换侧边栏", "Token": "Token", - "Tokens To Keep On Context Refresh (num_keep)": "在语境刷新时需保留的 Tokens", + "Tokens To Keep On Context Refresh (num_keep)": "在语境刷新时需保留的 Token 数量", "Too verbose": "过于冗长", "Tool created successfully": "工具创建成功", "Tool deleted successfully": "工具删除成功", From e31f680788910b04a1709271979c1b416f1b839a Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Feb 2025 20:46:00 -0800 Subject: [PATCH 020/623] refac --- backend/open_webui/routers/tasks.py | 46 +++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/backend/open_webui/routers/tasks.py b/backend/open_webui/routers/tasks.py index 0328cefe049..b63c9732af9 100644 --- a/backend/open_webui/routers/tasks.py +++ b/backend/open_webui/routers/tasks.py @@ -20,6 +20,10 @@ from open_webui.constants import TASKS from open_webui.routers.pipelines import process_pipeline_inlet_filter +from open_webui.utils.filter import ( + get_sorted_filter_ids, + process_filter_functions, +) from open_webui.utils.task import get_task_model_id from open_webui.config import ( @@ -221,6 +225,12 @@ async def generate_title( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -290,6 +300,12 @@ async def generate_chat_tags( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -356,6 +372,12 @@ async def generate_image_prompt( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -433,6 +455,12 @@ async def generate_queries( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -514,6 +542,12 @@ async def generate_autocompletion( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -584,6 +618,12 @@ async def generate_emoji( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: @@ -644,6 +684,12 @@ async def generate_moa_response( }, } + # Process the payload through the pipeline + try: + payload = await process_pipeline_inlet_filter(request, payload, user, models) + except Exception as e: + raise e + try: return await generate_chat_completion(request, form_data=payload, user=user) except Exception as e: From 68e33b53867de331ed71682d2dff869c55fb0856 Mon Sep 17 00:00:00 2001 From: Karl Lee <61072264+KarlLee830@users.noreply.github.com> Date: Fri, 21 Feb 2025 14:58:40 +0800 Subject: [PATCH 021/623] i18n: Update Chinese Translation --- src/lib/i18n/locales/zh-CN/translation.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 6bf556aa4e0..b0db571bace 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -995,8 +995,8 @@ "This will delete all models including custom models and cannot be undone.": "这将删除所有模型,包括自定义模型,且无法撤销。", "This will reset the knowledge base and sync all files. Do you wish to continue?": "这将重置知识库并替换所有文件为目录下文件。确认继续?", "Thorough explanation": "解释较为详细", - "Thought for {{DURATION}}": "已推理 持续 {{DURATION}}", - "Thought for {{DURATION}} seconds": "已推理 持续 {{DURATION}} 秒", + "Thought for {{DURATION}}": "思考用时 {{DURATION}}", + "Thought for {{DURATION}} seconds": "思考用时 {{DURATION}} 秒", "Tika": "Tika", "Tika Server URL required.": "请输入 Tika 服务器地址。", "Tiktoken": "Tiktoken", From ab1f2ae914301dfa080a3495813c98901e0c870c Mon Sep 17 00:00:00 2001 From: Olof Larsson Date: Fri, 21 Feb 2025 08:24:48 +0100 Subject: [PATCH 022/623] feat/async-pipes --- backend/open_webui/functions.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/functions.py b/backend/open_webui/functions.py index 274be56ec09..2f94f701e94 100644 --- a/backend/open_webui/functions.py +++ b/backend/open_webui/functions.py @@ -2,6 +2,7 @@ import sys import inspect import json +import asyncio from pydantic import BaseModel from typing import AsyncGenerator, Generator, Iterator @@ -76,11 +77,13 @@ async def get_function_models(request): if hasattr(function_module, "pipes"): sub_pipes = [] - # Check if pipes is a function or a list - + # Handle pipes being a list, sync function, or async function try: if callable(function_module.pipes): - sub_pipes = function_module.pipes() + if asyncio.iscoroutinefunction(function_module.pipes): + sub_pipes = await function_module.pipes() + else: + sub_pipes = function_module.pipes() else: sub_pipes = function_module.pipes except Exception as e: From 2993332b387c4230a4f5676c178bf4ece4384cdf Mon Sep 17 00:00:00 2001 From: huanght Date: Fri, 21 Feb 2025 16:10:11 +0800 Subject: [PATCH 023/623] fix:Quick selection tool lost --- src/lib/components/chat/Chat.svelte | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index fcd5177d753..98be06f5cd6 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -187,15 +187,20 @@ setToolIds(); } + $: if (atSelectedModel || selectedModels) { + setToolIds(); + } + const setToolIds = async () => { if (!$tools) { tools.set(await getTools(localStorage.token)); } - if (selectedModels.length !== 1) { + if (selectedModels.length !== 1 && !atSelectedModel) { return; } - const model = $models.find((m) => m.id === selectedModels[0]); + + const model = atSelectedModel ?? $models.find((m) => m.id === selectedModels[0]); if (model) { selectedToolIds = (model?.info?.meta?.toolIds ?? []).filter((id) => $tools.find((t) => t.id === id) From cdf620e6eedd797a01a984e510bc67761a051b14 Mon Sep 17 00:00:00 2001 From: Coleton M Date: Fri, 21 Feb 2025 04:41:45 -0600 Subject: [PATCH 024/623] Update audio.py to fetch custom URL voices and models --- backend/open_webui/routers/audio.py | 40 ++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index a970366d1ed..a8cd3d14a55 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -679,8 +679,21 @@ def transcription( def get_available_models(request: Request) -> list[dict]: available_models = [] + """if request.app.state.config.TTS_ENGINE == "openai": + available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}]""" if request.app.state.config.TTS_ENGINE == "openai": - available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}] + # Use custom endpoint if not using the official OpenAI API URL + if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): + try: + response = requests.get(f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/models") + response.raise_for_status() + data = response.json() + available_models = data.get("models", []) + except Exception as e: + log.error(f"Error fetching models from custom endpoint: {str(e)}") + available_models = [] + else: + available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}] elif request.app.state.config.TTS_ENGINE == "elevenlabs": try: response = requests.get( @@ -710,7 +723,7 @@ async def get_models(request: Request, user=Depends(get_verified_user)): def get_available_voices(request) -> dict: """Returns {voice_id: voice_name} dict""" available_voices = {} - if request.app.state.config.TTS_ENGINE == "openai": + """if request.app.state.config.TTS_ENGINE == "openai": available_voices = { "alloy": "alloy", "echo": "echo", @@ -718,7 +731,28 @@ def get_available_voices(request) -> dict: "onyx": "onyx", "nova": "nova", "shimmer": "shimmer", - } + }""" + if request.app.state.config.TTS_ENGINE == "openai": + # Use custom endpoint if not using the official OpenAI API URL + if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): + try: + response = requests.get(f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/voices") + response.raise_for_status() + data = response.json() + voices_list = data.get("voices", []) + available_voices = {voice["id"]: voice["name"] for voice in voices_list} + except Exception as e: + log.error(f"Error fetching voices from custom endpoint: {str(e)}") + available_voices = {} + else: + available_voices = { + "alloy": "alloy", + "echo": "echo", + "fable": "fable", + "onyx": "onyx", + "nova": "nova", + "shimmer": "shimmer", + } elif request.app.state.config.TTS_ENGINE == "elevenlabs": try: available_voices = get_elevenlabs_voices( From f789ad59a9e57b15e8218e7b5d36293051fbf44f Mon Sep 17 00:00:00 2001 From: Synergyst Date: Fri, 21 Feb 2025 04:47:46 -0600 Subject: [PATCH 025/623] Update audio.py Removed original code that was commented out --- backend/open_webui/routers/audio.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index a8cd3d14a55..12c9dbc6d0d 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -679,8 +679,6 @@ def transcription( def get_available_models(request: Request) -> list[dict]: available_models = [] - """if request.app.state.config.TTS_ENGINE == "openai": - available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}]""" if request.app.state.config.TTS_ENGINE == "openai": # Use custom endpoint if not using the official OpenAI API URL if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): @@ -723,15 +721,6 @@ async def get_models(request: Request, user=Depends(get_verified_user)): def get_available_voices(request) -> dict: """Returns {voice_id: voice_name} dict""" available_voices = {} - """if request.app.state.config.TTS_ENGINE == "openai": - available_voices = { - "alloy": "alloy", - "echo": "echo", - "fable": "fable", - "onyx": "onyx", - "nova": "nova", - "shimmer": "shimmer", - }""" if request.app.state.config.TTS_ENGINE == "openai": # Use custom endpoint if not using the official OpenAI API URL if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): From c4b441de6518d1c8bf0fa59b2c837984158fb9a8 Mon Sep 17 00:00:00 2001 From: Bob McElrath Date: Fri, 21 Feb 2025 09:12:34 -0500 Subject: [PATCH 026/623] Support thinking tags used by Openthinker --- backend/open_webui/utils/middleware.py | 53 ++++++++++++++++++-------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 7ec764fc010..8c82b7074b6 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -1127,12 +1127,12 @@ def serialize_content_blocks(content_blocks, raw=False): if reasoning_duration is not None: if raw: - content = f'{content}\n<{block["tag"]}>{block["content"]}\n' + content = f'{content}\n<{block["start_tag"]}>{block["content"]}<{block["end_tag"]}>\n' else: content = f'{content}\n
\nThought for {reasoning_duration} seconds\n{reasoning_display_content}\n
\n' else: if raw: - content = f'{content}\n<{block["tag"]}>{block["content"]}\n' + content = f'{content}\n<{block["start_tag"]}>{block["content"]}<{block["end_tag"]}>\n' else: content = f'{content}\n
\nThinking…\n{reasoning_display_content}\n
\n' @@ -1228,9 +1228,9 @@ def extract_attributes(tag_content): return attributes if content_blocks[-1]["type"] == "text": - for tag in tags: + for start_tag, end_tag in tags: # Match start tag e.g., or - start_tag_pattern = rf"<{tag}(\s.*?)?>" + start_tag_pattern = rf"<{re.escape(start_tag)}(\s.*?)?>" match = re.search(start_tag_pattern, content) if match: attr_content = ( @@ -1263,7 +1263,8 @@ def extract_attributes(tag_content): content_blocks.append( { "type": content_type, - "tag": tag, + "start_tag": start_tag, + "end_tag": end_tag, "attributes": attributes, "content": "", "started_at": time.time(), @@ -1275,9 +1276,10 @@ def extract_attributes(tag_content): break elif content_blocks[-1]["type"] == content_type: - tag = content_blocks[-1]["tag"] + start_tag = content_blocks[-1]["start_tag"] + end_tag = content_blocks[-1]["end_tag"] # Match end tag e.g., - end_tag_pattern = rf"" + end_tag_pattern = rf"<{re.escape(end_tag)}>" # Check if the content has the end tag if re.search(end_tag_pattern, content): @@ -1285,7 +1287,7 @@ def extract_attributes(tag_content): block_content = content_blocks[-1]["content"] # Strip start and end tags from the content - start_tag_pattern = rf"<{tag}(.*?)>" + start_tag_pattern = rf"<{re.escape(start_tag)}(.*?)>" block_content = re.sub( start_tag_pattern, "", block_content ).strip() @@ -1350,7 +1352,7 @@ def extract_attributes(tag_content): # Clean processed content content = re.sub( - rf"<{tag}(.*?)>(.|\n)*?", + rf"<{re.escape(start_tag)}(.*?)>(.|\n)*?<{re.escape(end_tag)}>", "", content, flags=re.DOTALL, @@ -1388,19 +1390,28 @@ def extract_attributes(tag_content): # We might want to disable this by default DETECT_REASONING = True + DETECT_SOLUTION = True DETECT_CODE_INTERPRETER = metadata.get("features", {}).get( "code_interpreter", False ) reasoning_tags = [ - "think", - "thinking", - "reason", - "reasoning", - "thought", - "Thought", + ("think", "/think"), + ("thinking", "/thinking"), + ("reason", "/reason"), + ("reasoning", "/reasoning"), + ("thought", "/thought"), + ("Thought", "/Thought"), + ("|begin_of_thought|", "|end_of_thought|") + ] + + code_interpreter_tags = [ + ("code_interpreter", "/code_interpreter") + ] + + solution_tags = [ + ("|begin_of_solution|", "|end_of_solution|") ] - code_interpreter_tags = ["code_interpreter"] try: for event in events: @@ -1533,6 +1544,16 @@ async def stream_body_handler(response): if end: break + if DETECT_SOLUTION: + content, content_blocks, _ = ( + tag_content_handler( + "solution", + solution_tags, + content, + content_blocks, + ) + ) + if ENABLE_REALTIME_CHAT_SAVE: # Save message in the database Chats.upsert_message_to_chat_by_id_and_message_id( From fb3886cf04d1431e7dae6968f607634999404b45 Mon Sep 17 00:00:00 2001 From: Simone Date: Fri, 21 Feb 2025 18:46:10 +0100 Subject: [PATCH 027/623] Added support for stop parameter --- backend/open_webui/utils/payload.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/backend/open_webui/utils/payload.py b/backend/open_webui/utils/payload.py index 51e8d50ccb1..4c1bbad9a9c 100644 --- a/backend/open_webui/utils/payload.py +++ b/backend/open_webui/utils/payload.py @@ -230,6 +230,12 @@ def convert_payload_openai_to_ollama(openai_payload: dict) -> dict: "system" ] # To prevent Ollama warning of invalid option provided + # If there is the "stop" parameter in the openai_payload, remap it to the ollama_payload.options + if "stop" in openai_payload: + ollama_options = ollama_payload.get("options", {}) + ollama_options["stop"] = openai_payload.get("stop") + ollama_payload["options"] = ollama_options + if "metadata" in openai_payload: ollama_payload["metadata"] = openai_payload["metadata"] From 613a087387c094e71ee91d29c015195ef401e160 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 10:55:03 -0800 Subject: [PATCH 028/623] refac --- backend/open_webui/routers/audio.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/backend/open_webui/routers/audio.py b/backend/open_webui/routers/audio.py index 12c9dbc6d0d..8eb11120599 100644 --- a/backend/open_webui/routers/audio.py +++ b/backend/open_webui/routers/audio.py @@ -681,15 +681,19 @@ def get_available_models(request: Request) -> list[dict]: available_models = [] if request.app.state.config.TTS_ENGINE == "openai": # Use custom endpoint if not using the official OpenAI API URL - if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): + if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith( + "https://api.openai.com" + ): try: - response = requests.get(f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/models") + response = requests.get( + f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/models" + ) response.raise_for_status() data = response.json() available_models = data.get("models", []) except Exception as e: log.error(f"Error fetching models from custom endpoint: {str(e)}") - available_models = [] + available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}] else: available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}] elif request.app.state.config.TTS_ENGINE == "elevenlabs": @@ -723,16 +727,27 @@ def get_available_voices(request) -> dict: available_voices = {} if request.app.state.config.TTS_ENGINE == "openai": # Use custom endpoint if not using the official OpenAI API URL - if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith("https://api.openai.com"): + if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith( + "https://api.openai.com" + ): try: - response = requests.get(f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/voices") + response = requests.get( + f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/voices" + ) response.raise_for_status() data = response.json() voices_list = data.get("voices", []) available_voices = {voice["id"]: voice["name"] for voice in voices_list} except Exception as e: log.error(f"Error fetching voices from custom endpoint: {str(e)}") - available_voices = {} + available_voices = { + "alloy": "alloy", + "echo": "echo", + "fable": "fable", + "onyx": "onyx", + "nova": "nova", + "shimmer": "shimmer", + } else: available_voices = { "alloy": "alloy", From a7d8ed0c6dc6b66ee9d37dc3a7d9812da71a7d7f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 12:11:21 -0800 Subject: [PATCH 029/623] refac --- src/lib/components/chat/Chat.svelte | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index 98be06f5cd6..7de69f6932f 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -1493,7 +1493,10 @@ params?.system ?? $settings?.system ?? '', $user.name, $settings?.userLocation - ? await getAndUpdateUserLocation(localStorage.token) + ? await getAndUpdateUserLocation(localStorage.token).catch((err) => { + console.error(err); + return undefined; + }) : undefined )}${ (responseMessage?.userContext ?? null) @@ -1578,7 +1581,12 @@ variables: { ...getPromptVariables( $user.name, - $settings?.userLocation ? await getAndUpdateUserLocation(localStorage.token) : undefined + $settings?.userLocation + ? await getAndUpdateUserLocation(localStorage.token).catch((err) => { + console.error(err); + return undefined; + }) + : undefined ) }, model_item: $models.find((m) => m.id === model.id), From 642dcd4b702a8e189c0d04e6899218797763a083 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 13:19:11 -0800 Subject: [PATCH 030/623] fix: model import --- .../components/workspace/Models/ModelEditor.svelte | 1 - .../(app)/workspace/models/create/+page.svelte | 12 ++++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/lib/components/workspace/Models/ModelEditor.svelte b/src/lib/components/workspace/Models/ModelEditor.svelte index 34b5a4b7b33..170c37f228b 100644 --- a/src/lib/components/workspace/Models/ModelEditor.svelte +++ b/src/lib/components/workspace/Models/ModelEditor.svelte @@ -180,7 +180,6 @@ } if (model) { - console.log(model); name = model.name; await tick(); diff --git a/src/routes/(app)/workspace/models/create/+page.svelte b/src/routes/(app)/workspace/models/create/+page.svelte index fddf8277be9..48fd5ccab2a 100644 --- a/src/routes/(app)/workspace/models/create/+page.svelte +++ b/src/routes/(app)/workspace/models/create/+page.svelte @@ -62,9 +62,17 @@ !['https://openwebui.com', 'https://www.openwebui.com', 'http://localhost:5173'].includes( event.origin ) - ) + ) { return; - model = JSON.parse(event.data); + } + + let data = JSON.parse(event.data); + + if (data?.info) { + data = data.info; + } + + model = data; }); if (window.opener ?? false) { From d50098b62272203b5999edff735e18fff91a2446 Mon Sep 17 00:00:00 2001 From: Jeannot Damoiseaux <62134006+jeannotdamoiseaux@users.noreply.github.com> Date: Fri, 21 Feb 2025 22:25:22 +0100 Subject: [PATCH 031/623] Fix: Ensure `user_oauth_groups` defaults to an empty list to prevent TypeError When the OAuth groups claim does not yield a list, `user_oauth_groups` was previously set to None, causing a TypeError during membership checks. Changed this default to an empty list (`[]`) to ensure the variable is always iterable, preventing errors for non-admin users while logging in. This fix ensures stability in the `update_user_groups` function. --- backend/open_webui/utils/oauth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 13835e78471..0b68be2de6c 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -146,7 +146,7 @@ def update_user_groups(self, user, user_data, default_permissions): nested_claims = oauth_claim.split(".") for nested_claim in nested_claims: claim_data = claim_data.get(nested_claim, {}) - user_oauth_groups = claim_data if isinstance(claim_data, list) else None + user_oauth_groups = claim_data if isinstance(claim_data, list) else [] user_current_groups: list[GroupModel] = Groups.get_groups_by_member_id(user.id) all_available_groups: list[GroupModel] = Groups.get_groups() From b14e75dd6cd4887202940bb99f649540ab3d7a1f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 13:40:11 -0800 Subject: [PATCH 032/623] feat: added Trust Proxy Environment switch in Web Search admin settings tab. Co-Authored-By: harry zhou <67385896+harryzhou2000@users.noreply.github.com> --- backend/open_webui/routers/retrieval.py | 1 + src/lib/components/admin/Settings/WebSearch.svelte | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 9611051b3fa..c2cb68c5d66 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -403,6 +403,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "bing_search_v7_subscription_key": request.app.state.config.BING_SEARCH_V7_SUBSCRIPTION_KEY, "exa_api_key": request.app.state.config.EXA_API_KEY, "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, + "trust_env": request.app.state.config.RAG_WEB_SEARCH_TRUST_ENV, "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, "domain_filter_list": request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, }, diff --git a/src/lib/components/admin/Settings/WebSearch.svelte b/src/lib/components/admin/Settings/WebSearch.svelte index 84e9d0e5a8e..84729117b7e 100644 --- a/src/lib/components/admin/Settings/WebSearch.svelte +++ b/src/lib/components/admin/Settings/WebSearch.svelte @@ -130,6 +130,19 @@
+
+
{$i18n.t('Trust Proxy Environment')}
+
+ + + +
+
+ {#if webConfig.search.engine !== ''}
{#if webConfig.search.engine === 'searxng'} From 9bada6421e0e2f06a396ffbec501471aed3cc81f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 16:39:56 -0800 Subject: [PATCH 033/623] refac: code block image styling --- src/lib/components/chat/Messages/CodeBlock.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index a5d08356ffb..06743265a55 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -514,7 +514,7 @@
{#each files as file} {#if file.type.startsWith('image')} - Output + Output {/if} {/each}
From 7bfa29fa815a8707f011377576c77daa74fa0db4 Mon Sep 17 00:00:00 2001 From: hopeless <99332743+pwnless@users.noreply.github.com> Date: Sat, 22 Feb 2025 12:13:14 +0800 Subject: [PATCH 034/623] Update payload.py Fixes ollama native tool calling because native tool calling content will be str '', and tool call processing will be completely ignored. --- backend/open_webui/utils/payload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/open_webui/utils/payload.py b/backend/open_webui/utils/payload.py index 4c1bbad9a9c..869e708955c 100644 --- a/backend/open_webui/utils/payload.py +++ b/backend/open_webui/utils/payload.py @@ -124,7 +124,7 @@ def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]: tool_call_id = message.get("tool_call_id", None) # Check if the content is a string (just a simple message) - if isinstance(content, str): + if isinstance(content, str) and not tool_calls: # If the content is a string, it's pure text new_message["content"] = content From 50dec1207299c04717b83e7df8826155b01f2a6f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Feb 2025 22:15:22 -0800 Subject: [PATCH 035/623] refac --- backend/open_webui/routers/auths.py | 13 ------------- backend/open_webui/utils/oauth.py | 9 --------- 2 files changed, 22 deletions(-) diff --git a/backend/open_webui/routers/auths.py b/backend/open_webui/routers/auths.py index 3fa2ffe2e9d..b5a5a645a57 100644 --- a/backend/open_webui/routers/auths.py +++ b/backend/open_webui/routers/auths.py @@ -252,14 +252,6 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm): if not user: try: user_count = Users.get_num_users() - if ( - request.app.state.USER_COUNT - and user_count >= request.app.state.USER_COUNT - ): - raise HTTPException( - status.HTTP_403_FORBIDDEN, - detail=ERROR_MESSAGES.ACCESS_PROHIBITED, - ) role = ( "admin" @@ -439,11 +431,6 @@ async def signup(request: Request, response: Response, form_data: SignupForm): ) user_count = Users.get_num_users() - if request.app.state.USER_COUNT and user_count >= request.app.state.USER_COUNT: - raise HTTPException( - status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED - ) - if not validate_email_format(form_data.email.lower()): raise HTTPException( status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.INVALID_EMAIL_FORMAT diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index 0b68be2de6c..2af54c19d7c 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -315,15 +315,6 @@ async def handle_callback(self, request, provider, response): if not user: user_count = Users.get_num_users() - if ( - request.app.state.USER_COUNT - and user_count >= request.app.state.USER_COUNT - ): - raise HTTPException( - 403, - detail=ERROR_MESSAGES.ACCESS_PROHIBITED, - ) - # If the user does not exist, check if signups are enabled if auth_manager_config.ENABLE_OAUTH_SIGNUP: # Check if an existing user with the same email already exists From 667d26ca12e9e640759e94f5fe69a8f88464cf16 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 22 Feb 2025 01:16:58 -0800 Subject: [PATCH 036/623] refac --- .../components/admin/Functions/FunctionEditor.svelte | 4 ++-- src/lib/components/chat/Messages/CodeBlock.svelte | 4 ++-- src/lib/components/common/CodeEditor.svelte | 11 ++++++++--- .../components/workspace/Tools/ToolkitEditor.svelte | 4 ++-- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/lib/components/admin/Functions/FunctionEditor.svelte b/src/lib/components/admin/Functions/FunctionEditor.svelte index cbdec242570..fe9b62053f6 100644 --- a/src/lib/components/admin/Functions/FunctionEditor.svelte +++ b/src/lib/components/admin/Functions/FunctionEditor.svelte @@ -371,10 +371,10 @@ class Pipe: value={content} lang="python" {boilerplate} - on:change={(e) => { + onChange={(e) => { _content = e.detail.value; }} - on:save={async () => { + onSave={async () => { if (formElement) { formElement.requestSubmit(); } diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index 06743265a55..4cfaff3e520 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -468,10 +468,10 @@ value={code} {id} {lang} - on:save={() => { + onSave={() => { saveCode(); }} - on:change={(e) => { + onChange={(e) => { _code = e.detail.value; }} /> diff --git a/src/lib/components/common/CodeEditor.svelte b/src/lib/components/common/CodeEditor.svelte index 7d9f3a55a63..d45c9eb272a 100644 --- a/src/lib/components/common/CodeEditor.svelte +++ b/src/lib/components/common/CodeEditor.svelte @@ -21,6 +21,10 @@ export let boilerplate = ''; export let value = ''; + + export let onSave = () => {}; + export let onChange = () => {}; + let _value = ''; $: if (value) { @@ -75,7 +79,7 @@ }); _value = formattedCode; - dispatch('change', { value: _value }); + onChange({ value: _value }); await tick(); toast.success($i18n.t('Code formatted successfully')); @@ -94,7 +98,7 @@ EditorView.updateListener.of((e) => { if (e.docChanged) { _value = e.state.doc.toString(); - dispatch('change', { value: _value }); + onChange({ value: _value }); } }), editorTheme.of([]), @@ -170,7 +174,8 @@ const keydownHandler = async (e) => { if ((e.ctrlKey || e.metaKey) && e.key === 's') { e.preventDefault(); - dispatch('save'); + + onSave(); } // Format code when Ctrl + Shift + F is pressed diff --git a/src/lib/components/workspace/Tools/ToolkitEditor.svelte b/src/lib/components/workspace/Tools/ToolkitEditor.svelte index 60d23176349..40d4715c172 100644 --- a/src/lib/components/workspace/Tools/ToolkitEditor.svelte +++ b/src/lib/components/workspace/Tools/ToolkitEditor.svelte @@ -284,10 +284,10 @@ class Tools: value={content} {boilerplate} lang="python" - on:change={(e) => { + onChange={(e) => { _content = e.detail.value; }} - on:save={() => { + onSave={() => { if (formElement) { formElement.requestSubmit(); } From 794919e91d24200f8911d307dc597a57f06843b5 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 22 Feb 2025 01:22:17 -0800 Subject: [PATCH 037/623] refac --- .../components/chat/Messages/CodeBlock.svelte | 21 +++++++------------ .../Messages/Markdown/MarkdownTokens.svelte | 4 ++-- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index 4cfaff3e520..02879a67439 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -1,18 +1,9 @@ @@ -802,10 +815,11 @@
{#if !edit} - {#if message.done || siblings.length > 1} -
+
+ {#if message.done || siblings.length > 1} {#if siblings.length > 1}
{/if}
From 7a63947b945a1f53c060d6da55f16c05fc2cbfcd Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 23 Feb 2025 18:41:33 -0800 Subject: [PATCH 052/623] fix: tailwind v4 ios 16.4 compatibility issue --- src/tailwind.css | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/tailwind.css b/src/tailwind.css index f4e0c0cdd9f..2b672c5ccd5 100644 --- a/src/tailwind.css +++ b/src/tailwind.css @@ -35,6 +35,11 @@ button { @apply cursor-pointer; } + + input::placeholder, + textarea::placeholder { + color: theme(--color-gray-400); + } } @custom-variant hover (&:hover); From 802f8809dd603b047bfe1e6107e120e4c0d6ac02 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 23 Feb 2025 21:39:34 -0800 Subject: [PATCH 053/623] fix: codeblock --- src/lib/components/chat/Messages/CodeBlock.svelte | 4 ++-- .../components/chat/Messages/Markdown/MarkdownTokens.svelte | 6 +++--- src/lib/components/common/CodeEditor.svelte | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index 6e5e2359afe..f92c66210aa 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -464,8 +464,8 @@ onSave={() => { saveCode(); }} - onChange={(e) => { - _code = e; + onChange={(value) => { + _code = value; }} /> diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index ae5020f10f2..18a4585c647 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -88,14 +88,14 @@ code={token?.text ?? ''} {attributes} {save} - onCode={(e) => { - dispatch('code', e); + onCode={(value) => { + dispatch('code', value); }} onSave={(e) => { dispatch('update', { raw: token.raw, oldContent: token.text, - newContent: e + newContent: value }); }} /> diff --git a/src/lib/components/common/CodeEditor.svelte b/src/lib/components/common/CodeEditor.svelte index d45c9eb272a..0c4a008f1ef 100644 --- a/src/lib/components/common/CodeEditor.svelte +++ b/src/lib/components/common/CodeEditor.svelte @@ -79,7 +79,7 @@ }); _value = formattedCode; - onChange({ value: _value }); + onChange(_value); await tick(); toast.success($i18n.t('Code formatted successfully')); @@ -98,7 +98,7 @@ EditorView.updateListener.of((e) => { if (e.docChanged) { _value = e.state.doc.toString(); - onChange({ value: _value }); + onChange(_value); } }), editorTheme.of([]), From dcaab9d6b57721a77e0b448c1189a95cdba62b28 Mon Sep 17 00:00:00 2001 From: Ekaterine Papava Date: Mon, 24 Feb 2025 09:07:45 +0100 Subject: [PATCH 054/623] Update Georgian translation --- src/lib/i18n/locales/ka-GE/translation.json | 994 ++++++++++---------- 1 file changed, 497 insertions(+), 497 deletions(-) diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 63527dc05f7..0ad6aa76b45 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -1,179 +1,179 @@ { - "-1 for no limit, or a positive integer for a specific limit": "", - "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' ან '-1' ვადის გასვლისთვის.", - "(e.g. `sh webui.sh --api --api-auth username_password`)": "", - "(e.g. `sh webui.sh --api`)": "(მაგ. `sh webui.sh --api`)", + "-1 for no limit, or a positive integer for a specific limit": "-1 ლიმიტის გამოსართავად, ან დადებითი მთელი რიცხვი კონკრეტული ლიმიტისთვის", + "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' ან '-1' - უვადოსთვის.", + "(e.g. `sh webui.sh --api --api-auth username_password`)": "(მაგ: `sh webui.sh --api --api-auth username_password`)", + "(e.g. `sh webui.sh --api`)": "(მაგ: `sh webui.sh --api`)", "(latest)": "(უახლესი)", - "{{ models }}": "{{ models }}", - "{{COUNT}} Replies": "", + "{{ models }}": "{{ მოდელები }}", + "{{COUNT}} Replies": "{{COUNT}} პასუხი", "{{user}}'s Chats": "{{user}}-ის ჩათები", - "{{webUIName}} Backend Required": "{{webUIName}} საჭიროა ბექენდი", + "{{webUIName}} Backend Required": "{{webUIName}} საჭიროა უკანაბოლო", "*Prompt node ID(s) are required for image generation": "", - "A new version (v{{LATEST_VERSION}}) is now available.": "", + "A new version (v{{LATEST_VERSION}}) is now available.": "ხელმისაწვდომია ახალი ვერსია (v{{LATEST_VERSION}}).", "A task model is used when performing tasks such as generating titles for chats and web search queries": "დავალების მოდელი გამოიყენება ისეთი ამოცანების შესრულებისას, როგორიცაა ჩეთების სათაურების გენერირება და ვებ – ძიების მოთხოვნები", "a user": "მომხმარებელი", "About": "შესახებ", - "Access": "", - "Access Control": "", - "Accessible to all users": "", + "Access": "წვდომა", + "Access Control": "წვდომის კონტროლი", + "Accessible to all users": "ხელმისაწვდომია ყველა მომხმარებლისთვის", "Account": "ანგარიში", - "Account Activation Pending": "", - "Accurate information": "დიდი ინფორმაცია", - "Actions": "", - "Activate": "", + "Account Activation Pending": "დარჩენილი ანგარიშის აქტივაცია", + "Accurate information": "სწორი ინფორმაცია", + "Actions": "ქმედებები", + "Activate": "აქტივაცია", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "", - "Active Users": "", + "Active Users": "აქტიური მომხმარებლები", "Add": "დამატება", - "Add a model ID": "", + "Add a model ID": "მოდელის ID-ის დამატება", "Add a short description about what this model does": "დაამატეთ მოკლე აღწერა იმის შესახებ, თუ რას აკეთებს ეს მოდელი", - "Add a tag": "დაამატე ტეგი", - "Add Arena Model": "", - "Add Connection": "", - "Add Content": "", - "Add content here": "", - "Add custom prompt": "პირველადი მოთხოვნის დამატება", + "Add a tag": "ჭდის დამატება", + "Add Arena Model": "არენის მოდელის დამატება", + "Add Connection": "შეერთების დამატება", + "Add Content": "შემცველობის დამატება", + "Add content here": "შემცველობის აქ დამატება", + "Add custom prompt": "მორგებული მოთხოვნის დამატება", "Add Files": "ფაილების დამატება", - "Add Group": "", - "Add Memory": "მემორიის დამატება", + "Add Group": "ჯგუფის დამატება", + "Add Memory": "მეხსიერების დამატება", "Add Model": "მოდელის დამატება", - "Add Reaction": "", - "Add Tag": "", - "Add Tags": "ტეგების დამატება", - "Add text content": "", + "Add Reaction": "რეაქციის დამატება", + "Add Tag": "ჭდის დამატება", + "Add Tags": "ჭდეების დამატება", + "Add text content": "ტექსტური შემცველობის დამატება", "Add User": "მომხმარებლის დამატება", - "Add User Group": "", - "Adjusting these settings will apply changes universally to all users.": "ამ პარამეტრების რეგულირება ცვლილებებს უნივერსალურად გამოიყენებს ყველა მომხმარებლისთვის", + "Add User Group": "მომხმარებლის ჯგუფის დამატება", + "Adjusting these settings will apply changes universally to all users.": "ამ პარამეტრების რეგულირება ცვლილებებს უნივერსალურად გამოიყენებს ყველა მომხმარებლისთვის.", "admin": "ადმინისტრატორი", - "Admin": "", - "Admin Panel": "ადმინ პანელი", - "Admin Settings": "ადმინისტრატორის ხელსაწყოები", + "Admin": "ადმინი", + "Admin Panel": "ადმინისტრატორის პანელი", + "Admin Settings": "ადმინისტრატორის მორგება", "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "", "Advanced Parameters": "დამატებითი პარამეტრები", - "Advanced Params": "მოწინავე პარამები", + "Advanced Params": "დამატებითი პარამეტრები", "All Documents": "ყველა დოკუმენტი", - "All models deleted successfully": "", - "Allow Chat Controls": "", - "Allow Chat Delete": "", - "Allow Chat Deletion": "მიმოწერის წაშლის დაშვება", - "Allow Chat Edit": "", - "Allow File Upload": "", - "Allow non-local voices": "", - "Allow Temporary Chat": "", - "Allow User Location": "", + "All models deleted successfully": "ყველა მოდელი წარმატებით წაიშალა", + "Allow Chat Controls": "ჩატის კონტროლის ელემენტების დაშვება", + "Allow Chat Delete": "ჩატის წაშლის დაშვება", + "Allow Chat Deletion": "ჩატის წაშლის დაშვება", + "Allow Chat Edit": "ჩატის ჩასწორების დაშვება", + "Allow File Upload": "ფაილის ატვირთვის დაშვება", + "Allow non-local voices": "არალოკალური ხმების დაშვება", + "Allow Temporary Chat": "დროებითი ჩატის დაშვება", + "Allow User Location": "მომხმარებლის მდებარეობის დაშვება", "Allow Voice Interruption in Call": "", - "Allowed Endpoints": "", - "Already have an account?": "უკვე გაქვს ანგარიში?", + "Allowed Endpoints": "დაშვებული ბოლოწერტილები", + "Already have an account?": "უკვე გაქვთ ანგარიში?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", - "Always": "", - "Amazing": "", - "an assistant": "ასისტენტი", - "Analyzed": "", - "Analyzing...": "", + "Always": "ყოველთვის", + "Amazing": "გადასარევია", + "an assistant": "დამხმარე", + "Analyzed": "გაანაზლიებულია", + "Analyzing...": "ანალიზი...", "and": "და", - "and {{COUNT}} more": "", - "and create a new shared link.": "და შექმენით ახალი გაზიარებული ბმული.", - "API Base URL": "API საბაზისო URL", + "and {{COUNT}} more": "და კიდევ {{COUNT}}", + "and create a new shared link.": "და ახალი გაზიარებული ბმულის შექმნა.", + "API Base URL": "API-ის საბაზისო URL", "API Key": "API გასაღები", - "API Key created.": "API გასაღები შექმნილია.", + "API Key created.": "API გასაღები შეიქმნა.", "API Key Endpoint Restrictions": "", - "API keys": "API გასაღები", - "Application DN": "", - "Application DN Password": "", + "API keys": "API გასაღებები", + "Application DN": "აპლიკაციის DN", + "Application DN Password": "აპლიკაციის DN-ის პაროლი", "applies to all users with the \"user\" role": "", "April": "აპრილი", - "Archive": "არქივი", - "Archive All Chats": "არქივი ყველა ჩატი", - "Archived Chats": "ჩატის ისტორიის არქივი", + "Archive": "დაარქივება", + "Archive All Chats": "ყველა ჩატის დაარქივება", + "Archived Chats": "დაარქივებული ჩატები", "archived-chat-export": "", "Are you sure you want to delete this channel?": "", "Are you sure you want to delete this message?": "", "Are you sure you want to unarchive all archived chats?": "", - "Are you sure?": "დარწმუნებული ხარ?", - "Arena Models": "", - "Artifacts": "", - "Ask a question": "", - "Assistant": "", - "Attach file": "ფაილის ჩაწერა", - "Attention to detail": "დეტალური მიმართვა", - "Attribute for Mail": "", - "Attribute for Username": "", - "Audio": "ხმოვანი", + "Are you sure?": "დარწმუნებული ბრძანდებით?", + "Arena Models": "არენის მოდელები", + "Artifacts": "არტეფაქტები", + "Ask a question": "კითხვის დასმა", + "Assistant": "დამხმარე", + "Attach file": "ფაილის მიმაგრება", + "Attention to detail": "ყურადღებით დეტალებთან", + "Attribute for Mail": "ატრიბუტი ფოსტისთვის", + "Attribute for Username": "ატრიბუტი მომხმარებლის სახელისთვის", + "Audio": "აუდიო", "August": "აგვისტო", - "Authenticate": "", - "Authentication": "", + "Authenticate": "ავთენტიკაცია", + "Authentication": "ავთენტიკაცია", "Auto-Copy Response to Clipboard": "პასუხის ავტომატური კოპირება ბუფერში", "Auto-playback response": "ავტომატური დაკვრის პასუხი", "Autocomplete Generation": "", "Autocomplete Generation Input Max Length": "", - "Automatic1111": "", + "Automatic1111": "Automatic1111", "AUTOMATIC1111 Api Auth String": "", "AUTOMATIC1111 Base URL": "AUTOMATIC1111 საბაზისო მისამართი", - "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 საბაზისო მისამართი აუცილებელია", - "Available list": "", + "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 საბაზისო მისამართი აუცილებელია.", + "Available list": "ხელმისაწვდომი სია", "available!": "ხელმისაწვდომია!", - "Awful": "", + "Awful": "საშინელი", "Azure AI Speech": "", - "Azure Region": "", + "Azure Region": "Azure-ის რეგიონი", "Back": "უკან", - "Bad Response": "ხარვეზი", - "Banners": "რეკლამა", - "Base Model (From)": "საბაზო მოდელი (-დან)", + "Bad Response": "არასწორი პასუხი", + "Banners": "ბანერები", + "Base Model (From)": "საბაზისო მოდელი (საიდან)", "Batch Size (num_batch)": "", - "before": "ადგილზე", - "Being lazy": "ჩაიტყვევა", - "Beta": "", + "before": "მითითებულ დრომდე", + "Being lazy": "ზარმაცობა", + "Beta": "ბეტა", "Bing Search V7 Endpoint": "", "Bing Search V7 Subscription Key": "", "Bocha Search API Key": "", - "Brave Search API Key": "Brave Search API გასაღები", - "By {{name}}": "", + "Brave Search API Key": "Brave Search API-ის გასაღები", + "By {{name}}": "ავტორი {{name}}", "Bypass SSL verification for Websites": "SSL-ის ვერიფიკაციის გააუქმება ვებსაიტებზე", - "Calendar": "", - "Call": "", + "Calendar": "კალენდარი", + "Call": "ზარი", "Call feature is not supported when using Web STT engine": "", - "Camera": "", + "Camera": "კამერა", "Cancel": "გაუქმება", "Capabilities": "შესაძლებლობები", - "Capture": "", - "Certificate Path": "", + "Capture": "ჩაჭერა", + "Certificate Path": "სერტიფიკატის ბილიკი", "Change Password": "პაროლის შეცვლა", - "Channel Name": "", - "Channels": "", - "Character": "", + "Channel Name": "არხის სახელი", + "Channels": "არხები", + "Character": "სიმბოლო", "Character limit for autocomplete generation input": "", "Chart new frontiers": "", - "Chat": "მიმოწერა", + "Chat": "ჩატი", "Chat Background Image": "", - "Chat Bubble UI": "ჩატის ბულბი", + "Chat Bubble UI": "ჩატის ბუშტის ინტერფეისი", "Chat Controls": "", "Chat direction": "ჩატის მიმართულება", "Chat Overview": "", "Chat Permissions": "", "Chat Tags Auto-Generation": "", - "Chats": "მიმოწერები", + "Chats": "საუბრები", "Check Again": "თავიდან შემოწმება", - "Check for updates": "განახლებების ძიება", - "Checking for updates...": "მიმდინარეობს განახლებების ძიება...", + "Check for updates": "განახლებების შემოწმება", + "Checking for updates...": "განახლებების შემოწმება...", "Choose a model before saving...": "აირჩიეთ მოდელი შენახვამდე...", - "Chunk Overlap": "გადახურვა ფრაგმენტულია", - "Chunk Params": "გადახურვის პარამეტრები", - "Chunk Size": "გადახურვის ზომა", + "Chunk Overlap": "ფრაგმენტის გადაფარვა", + "Chunk Params": "ფრაგმენტის პარამეტრები", + "Chunk Size": "ფრაგმენტის ზომა", "Ciphers": "", "Citation": "ციტატა", "Clear memory": "", - "click here": "", + "click here": "აქ დააწკაპუნეთ", "Click here for filter guides.": "", - "Click here for help.": "დახმარებისთვის, დააკლიკე აქ", - "Click here to": "დააკლიკე აქ", + "Click here for help.": "დახმარებისთვის დააწკაპუნეთ აქ.", + "Click here to": "დააწკაპუნეთ აქ", "Click here to download user import template file.": "", "Click here to learn more about faster-whisper and see the available models.": "", "Click here to see available models.": "", - "Click here to select": "ასარჩევად, დააკლიკე აქ", - "Click here to select a csv file.": "ასარჩევად, დააკლიკე აქ", + "Click here to select": "ასარჩევად დააწკაპუნეთ აქ", + "Click here to select a csv file.": "დააწკაპუნეთ აქ csv ფაილის ასარჩევად.", "Click here to select a py file.": "", "Click here to upload a workflow.json file.": "", - "click here.": "დააკლიკე აქ", - "Click on the user role button to change a user's role.": "დააკლიკეთ მომხმარებლის როლის ღილაკს რომ შეცვალოთ მომხმარების როლი", + "click here.": "დააწკაპუნეთ აქ.", + "Click on the user role button to change a user's role.": "მომხმარებლის როლის შესაცვლელად დააწკაპუნეთ მომხმარებლის როლზე.", "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "", "Clone": "კლონი", "Clone Chat": "", @@ -187,8 +187,8 @@ "Code Interpreter": "", "Code Interpreter Engine": "", "Code Interpreter Prompt Template": "", - "Collection": "ნაკრები", - "Color": "", + "Collection": "კოლექცია", + "Color": "ფერი", "ComfyUI": "ComfyUI", "ComfyUI API Key": "", "ComfyUI Base URL": "ComfyUI საბაზისო URL", @@ -197,17 +197,17 @@ "ComfyUI Workflow Nodes": "", "Command": "ბრძანება", "Completions": "", - "Concurrent Requests": "თანმხლები მოთხოვნები", - "Configure": "", - "Confirm": "", - "Confirm Password": "პაროლის დამოწმება", + "Concurrent Requests": "ერთდროული მოთხოვნები", + "Configure": "მორგება", + "Confirm": "დადასტურება", + "Confirm Password": "გაიმეორეთ პაროლი", "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "კავშირები", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", "Contact Admin for WebUI Access": "", - "Content": "კონტენტი", + "Content": "შემცველობა", "Content Extraction": "", "Context Length": "კონტექსტის სიგრძე", "Continue Response": "პასუხის გაგრძელება", @@ -216,134 +216,134 @@ "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", - "Controls": "", + "Controls": "მმართველები", "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", - "Copied": "", - "Copied shared chat URL to clipboard!": "ყავს ჩათის URL-ი კლიპბორდში!", - "Copied to clipboard": "", + "Copied": "დაკოპირდა", + "Copied shared chat URL to clipboard!": "გაზიარებული ჩატის ბმული დაკოპირდა ბუფერში!", + "Copied to clipboard": "დაკოპირდა გაცვლის ბაფერში", "Copy": "კოპირება", - "Copy last code block": "ბოლო ბლოკის კოპირება", + "Copy last code block": "ბოლო კოდის ბლოკის კოპირება", "Copy last response": "ბოლო პასუხის კოპირება", - "Copy Link": "კოპირება", - "Copy to clipboard": "", - "Copying to clipboard was successful!": "კლავიატურაზე კოპირება წარმატებით დასრულდა", + "Copy Link": "ბმულის კოპირება", + "Copy to clipboard": "ბუფერში კოპირება", + "Copying to clipboard was successful!": "გაცვლის ბუფერში კოპირება წარმატებულია!", "CORS must be properly configured by the provider to allow requests from Open WebUI.": "", - "Create": "", + "Create": "შექმნა", "Create a knowledge base": "", - "Create a model": "შექმენით მოდელი", + "Create a model": "მოდელის შექმნა", "Create Account": "ანგარიშის შექმნა", "Create Admin Account": "", - "Create Channel": "", - "Create Group": "", + "Create Channel": "არხის შექმნა", + "Create Group": "ჯგუფის შექმნა", "Create Knowledge": "", - "Create new key": "პირადი ღირებულბრის შექმნა", - "Create new secret key": "პირადი ღირებულბრის შექმნა", - "Created at": "შექმნილია", - "Created At": "შექმნილია", - "Created by": "", - "CSV Import": "", + "Create new key": "ახალი გასაღების შექმნა", + "Create new secret key": "ახალი საიდუმლო გასაღების შექმნა", + "Created at": "შექმნის დრო", + "Created At": "შექმნის დრო", + "Created by": "ავტორი", + "CSV Import": "CSV-ის შემოტანა", "Current Model": "მიმდინარე მოდელი", "Current Password": "მიმდინარე პაროლი", - "Custom": "საკუთარი", + "Custom": "ხელით", "Dark": "მუქი", "Database": "მონაცემთა ბაზა", "December": "დეკემბერი", - "Default": "დეფოლტი", + "Default": "ნაგულისხმევი", "Default (Open AI)": "", - "Default (SentenceTransformers)": "დეფოლტ (SentenceTransformers)", + "Default (SentenceTransformers)": "ნაგულისხმევი (SentenceTransformers)", "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model’s built-in tool-calling capabilities, but requires the model to inherently support this feature.": "", - "Default Model": "ნაგულისხმები მოდელი", - "Default model updated": "დეფოლტ მოდელი განახლებულია", + "Default Model": "ნაგულისხმევი მოდელი", + "Default model updated": "ნაგულისხმევი მოდელი განახლდა", "Default Models": "", - "Default permissions": "", + "Default permissions": "ნაგულისხმები წვდომები", "Default permissions updated successfully": "", - "Default Prompt Suggestions": "დეფოლტ პრომპტი პირველი პირველი", + "Default Prompt Suggestions": "ნაგულისხმევი მოთხოვნის მინიშნებები", "Default to 389 or 636 if TLS is enabled": "", "Default to ALL": "", - "Default User Role": "მომხმარებლის დეფოლტ როლი", + "Default User Role": "მომხმარებლის ნაგულისხმევი როლი", "Delete": "წაშლა", "Delete a model": "მოდელის წაშლა", "Delete All Chats": "ყველა ჩატის წაშლა", "Delete All Models": "", - "Delete chat": "შეტყობინების წაშლა", - "Delete Chat": "შეტყობინების წაშლა", - "Delete chat?": "", - "Delete folder?": "", - "Delete function?": "", - "Delete Message": "", - "Delete message?": "", - "Delete prompt?": "", - "delete this link": "ბმულის წაშლა", - "Delete tool?": "", + "Delete chat": "საუბრის წაშლა", + "Delete Chat": "საუბრის წაშლა", + "Delete chat?": "წავშალო ჩატი?", + "Delete folder?": "წავშალო საქაღალდეები?", + "Delete function?": "წავშალო ფუნქცია?", + "Delete Message": "შეტყობინების წაშლა", + "Delete message?": "წავშალო შეტყობინება?", + "Delete prompt?": "წავშალო მოთხოვნის ზოლი?", + "delete this link": "ამ ბმული წაშლა", + "Delete tool?": "წავშალო ხელსაწყო?", "Delete User": "მომხმარებლის წაშლა", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} წაშლილია", "Deleted {{name}}": "Deleted {{name}}", - "Deleted User": "", + "Deleted User": "წაშლილი მომხმარებელი", "Describe your knowledge base and objectives": "", "Description": "აღწერა", - "Didn't fully follow instructions": "ვერ ყველა ინფორმაციისთვის ვერ ხელახლა ჩაწერე", - "Direct Connections": "", + "Didn't fully follow instructions": "ინსტრუქციებს სრულად არ მივყევი", + "Direct Connections": "პირდაპირი მიერთება", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", - "Disabled": "", + "Disabled": "გამორთული", "Discover a function": "", - "Discover a model": "გაიგეთ მოდელი", + "Discover a model": "აღმოაჩინეთ მოდელი", "Discover a prompt": "აღმოაჩინეთ მოთხოვნა", "Discover a tool": "", "Discover how to use Open WebUI and seek support from the community.": "", "Discover wonders": "", "Discover, download, and explore custom functions": "", - "Discover, download, and explore custom prompts": "აღმოაჩინეთ, ჩამოტვირთეთ და შეისწავლეთ მორგებული მოთხოვნები", + "Discover, download, and explore custom prompts": "აღმოაჩინეთ, გადმოწერეთ და შეისწავლეთ მორგებული მოთხოვნები", "Discover, download, and explore custom tools": "", - "Discover, download, and explore model presets": "აღმოაჩინეთ, ჩამოტვირთეთ და შეისწავლეთ მოდელის წინასწარ პარამეტრები", + "Discover, download, and explore model presets": "აღმოაჩინეთ, გადმოწერეთ და შეისწავლეთ მოდელის პარამეტრები", "Dismissible": "", - "Display": "", + "Display": "ჩვენება", "Display Emoji in Call": "", - "Display the username instead of You in the Chat": "ჩატში აჩვენე მომხმარებლის სახელი თქვენს ნაცვლად", + "Display the username instead of You in the Chat": "ჩატში თქვენს მაგიერ მომხმარებლის სახელის ჩვენება", "Displays citations in the response": "", "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "დოკუმენტი", - "Documentation": "", + "Documentation": "დოკუმენტაცია", "Documents": "დოკუმენტები", - "does not make any external connections, and your data stays securely on your locally hosted server.": "არ ამყარებს გარე კავშირებს და თქვენი მონაცემები უსაფრთხოდ რჩება თქვენს ადგილობრივ სერვერზე.", + "does not make any external connections, and your data stays securely on your locally hosted server.": "არ ამყარებს გარე კავშირებს და თქვენი მონაცემები უსაფრთხოდ რჩება თქვენს ლოკალურ სერვერზე.", "Domain Filter List": "", - "Don't have an account?": "არ გაქვს ანგარიში?", + "Don't have an account?": "არ გაქვთ ანგარიში?", "don't install random functions from sources you don't trust.": "", "don't install random tools from sources you don't trust.": "", - "Don't like the style": "არ ეთიკურია ფართოდ", - "Done": "", - "Download": "ჩამოტვირთვა გაუქმებულია", + "Don't like the style": "არ მომწონს სტილი", + "Done": "დასრულებული", + "Download": "გადმოწერა", "Download as SVG": "", - "Download canceled": "ჩამოტვირთვა გაუქმებულია", - "Download Database": "გადმოწერე მონაცემთა ბაზა", + "Download canceled": "გადმოწერა გაუქმდა", + "Download Database": "მონაცემთა ბაზის გადმოწერა", "Drag and drop a file to upload or select a file to view": "", - "Draw": "", - "Drop any files here to add to the conversation": "გადაიტანეთ ფაილები აქ, რათა დაამატოთ ისინი მიმოწერაში", - "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "მაგალითად, '30წ', '10მ'. მოქმედი დროის ერთეულები: 'წ', 'წთ', 'სთ'.", - "e.g. 60": "", + "Draw": "ხატვა", + "Drop any files here to add to the conversation": "დაყარეთ ფაილები აქ მათი საუბარში ჩასამატებლად", + "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "მაგ: '30წ', '10მ'. მოქმედი დროის ერთეულები: 'წ', 'წთ', 'სთ'.", + "e.g. 60": "მაგ: 60", "e.g. A filter to remove profanity from text": "", - "e.g. My Filter": "", - "e.g. My Tools": "", - "e.g. my_filter": "", - "e.g. my_tools": "", + "e.g. My Filter": "მაგ: ჩემი ფილტრი", + "e.g. My Tools": "მაგ: ჩემი ხელსაწყოები", + "e.g. my_filter": "მაგ: ჩემი_ფილტრი", + "e.g. my_tools": "მაგ: ჩემი_ხელსაწყოები", "e.g. Tools for performing various operations": "", - "Edit": "რედაქტირება", - "Edit Arena Model": "", - "Edit Channel": "", - "Edit Connection": "", - "Edit Default Permissions": "", - "Edit Memory": "", - "Edit User": "მომხმარებლის ედიტირება", - "Edit User Group": "", - "ElevenLabs": "", - "Email": "ელ-ფოსტა", + "Edit": "ჩასწორება", + "Edit Arena Model": "არენის მოდელის ჩასწორება", + "Edit Channel": "არხის ჩასწორება", + "Edit Connection": "შეერთების ჩასწორება", + "Edit Default Permissions": "ნაგულისხმევი წვდომების ჩასწორება", + "Edit Memory": "მეხსიერების ჩასწორება", + "Edit User": "მომხმარებლის ჩასწორება", + "Edit User Group": "მომხმარებლის ჯგუფის ჩასწორება", + "ElevenLabs": "ElevenLabs", + "Email": "ელფოსტა", "Embark on adventures": "", "Embedding Batch Size": "", - "Embedding Model": "ჩასმის ძირითადი პროგრამა", - "Embedding Model Engine": "ჩასმის ძირითადი პროგრამა", - "Embedding model set to \"{{embedding_model}}\"": "ჩასმის ძირითადი პროგრამა ჩართულია \"{{embedding_model}}\"", + "Embedding Model": "მოდელის ჩაშენება", + "Embedding Model Engine": "ჩაშენებული მოდელის ძრავა", + "Embedding model set to \"{{embedding_model}}\"": "ჩაშენებული მოდელი დაყენებულია მნიშვნელობაზე \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", "Enable Code Interpreter": "", @@ -355,27 +355,27 @@ "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "ახალი რეგისტრაციების ჩართვა", "Enable Web Search": "ვებ ძიების ჩართვა", - "Enabled": "", - "Engine": "", - "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "გთხოვთ, უზრუნველყოთ, რომთქვევის CSV-ფაილი შეიცავს 4 ველი, ჩაწერილი ორივე ველი უდრის პირველი ველით.", + "Enabled": "ჩართულია", + "Engine": "ძრავი", + "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "დარწმუნდით, რომ თქვენი CSV-ფაილი შეიცავს 4 ველს ამ მიმდევრობით: სახელი, ელფოსტა, პაროლი, როლი.", "Enter {{role}} message here": "შეიყვანე {{role}} შეტყობინება აქ", - "Enter a detail about yourself for your LLMs to recall": "შეიყვანე დეტალი ჩემთათვის, რომ ჩვენი LLMs-ს შეიძლოს აღაქვს", + "Enter a detail about yourself for your LLMs to recall": "შეიყვანეთ რამე თქვენს შესახებ, რომ თქვენმა LLM-მა გაიხსენოს", "Enter api auth string (e.g. username:password)": "", "Enter Application DN": "", "Enter Application DN Password": "", "Enter Bing Search V7 Endpoint": "", "Enter Bing Search V7 Subscription Key": "", "Enter Bocha Search API Key": "", - "Enter Brave Search API Key": "შეიყვანეთ Brave Search API გასაღები", + "Enter Brave Search API Key": "შეიყვანეთ Brave Search API-ის გასაღები", "Enter certificate path": "", "Enter CFG Scale (e.g. 7.0)": "", - "Enter Chunk Overlap": "შეიყვანეთ ნაწილის გადახურვა", - "Enter Chunk Size": "შეიყვანე ბლოკის ზომა", - "Enter description": "", + "Enter Chunk Overlap": "შეიყვანეთ ფრაგმენტის გადაფარვა", + "Enter Chunk Size": "შეიყვანე ფრაგმენტის ზომა", + "Enter description": "შეიყვანეთ აღწერა", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "შეიყვანეთ Github Raw URL", - "Enter Google PSE API Key": "შეიყვანეთ Google PSE API გასაღები", + "Enter Google PSE API Key": "შეიყვანეთ Google PSE API-ის გასაღები", "Enter Google PSE Engine Id": "შეიყვანეთ Google PSE ძრავის ID", "Enter Image Size (e.g. 512x512)": "შეიყვანეთ სურათის ზომა (მაგ. 512x512)", "Enter Jina API Key": "", @@ -383,9 +383,9 @@ "Enter Jupyter Token": "", "Enter Jupyter URL": "", "Enter Kagi Search API Key": "", - "Enter language codes": "შეიყვანეთ ენის კოდი", + "Enter language codes": "შეიყვანეთ ენის კოდები", "Enter Model ID": "", - "Enter model tag (e.g. {{modelTag}})": "შეიყვანეთ მოდელის ტეგი (მაგ. {{modelTag}})", + "Enter model tag (e.g. {{modelTag}})": "შეიყვანეთ მოდელის ჭდე (მაგ: {{modelTag}})", "Enter Mojeek Search API Key": "", "Enter Number of Steps (e.g. 50)": "შეიყვანეთ ნაბიჯების რაოდენობა (მაგ. 50)", "Enter proxy URL (e.g. https://user:password@host:port)": "", @@ -405,26 +405,26 @@ "Enter server host": "", "Enter server label": "", "Enter server port": "", - "Enter stop sequence": "შეიყვანეთ ტოპ თანმიმდევრობა", + "Enter stop sequence": "შეიყვანეთ გაჩერების მიმდევრობა", "Enter system prompt": "", "Enter Tavily API Key": "", "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "", "Enter Tika Server URL": "", "Enter timeout in seconds": "", "Enter Top K": "შეიყვანეთ Top K", - "Enter URL (e.g. http://127.0.0.1:7860/)": "შეიყვანეთ მისამართი (მაგალითად http://127.0.0.1:7860/)", - "Enter URL (e.g. http://localhost:11434)": "შეიყვანეთ მისამართი (მაგალითად http://localhost:11434)", - "Enter your current password": "", - "Enter Your Email": "შეიყვანეთ თქვენი ელ-ფოსტა", + "Enter URL (e.g. http://127.0.0.1:7860/)": "შეიყვანეთ ბმული (მაგ: http://127.0.0.1:7860/)", + "Enter URL (e.g. http://localhost:11434)": "შეიყვანეთ ბმული (მაგ: http://localhost:11434)", + "Enter your current password": "შეიყვანეთ თქვენი მიმდინარე პაროლი", + "Enter Your Email": "შეიყვანეთ თქვენი ელფოსტა", "Enter Your Full Name": "შეიყვანეთ თქვენი სრული სახელი", "Enter your message": "", - "Enter your new password": "", + "Enter your new password": "შეიყვანეთ თქვენი ახალი პაროლი", "Enter Your Password": "შეიყვანეთ თქვენი პაროლი", "Enter Your Role": "შეიყვანეთ თქვენი როლი", "Enter Your Username": "", "Enter your webhook URL": "", "Error": "შეცდომა", - "ERROR": "", + "ERROR": "ERROR", "Error accessing Google Drive: {{error}}": "", "Error uploading file: {{error}}": "", "Evaluations": "", @@ -434,62 +434,62 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", - "Exclude": "", + "Exclude": "გამორიცხვა", "Execute code for analysis": "", - "Experimental": "ექსპერიმენტალური", + "Experimental": "ექსპერიმენტული", "Explore the cosmos": "", - "Export": "ექსპორტი", + "Export": "გატანა", "Export All Archived Chats": "", - "Export All Chats (All Users)": "ექსპორტი ყველა ჩათი (ყველა მომხმარებელი)", + "Export All Chats (All Users)": "ყველა ჩატის გატანა (ყველა მომხმარებელი)", "Export chat (.json)": "", - "Export Chats": "მიმოწერის ექსპორტირება", + "Export Chats": "ჩატების გატანა", "Export Config to JSON File": "", "Export Functions": "", - "Export Models": "ექსპორტის მოდელები", - "Export Presets": "", - "Export Prompts": "მოთხოვნების ექსპორტი", - "Export to CSV": "", + "Export Models": "მოდელების გატანა", + "Export Presets": "პრესეტების გატანა", + "Export Prompts": "მოთხოვნების გატანა", + "Export to CSV": "CVS-ში გატანა", "Export Tools": "", "External Models": "", - "Failed to add file.": "", - "Failed to create API Key.": "API ღილაკის შექმნა ვერ მოხერხდა.", + "Failed to add file.": "ფაილის დამატების შეცდომა.", + "Failed to create API Key.": "API-ის გასაღების შექმნა ჩავარდა.", "Failed to fetch models": "", - "Failed to read clipboard contents": "ბუფერში შიგთავსის წაკითხვა ვერ მოხერხდა", + "Failed to read clipboard contents": "ბუფერის შემცველობის წაკითხვა ჩავარდა", "Failed to save models configuration": "", "Failed to update settings": "", "Failed to upload file.": "", - "Features": "", + "Features": "მახასიათებლები", "Features Permissions": "", "February": "თებერვალი", "Feedback History": "", "Feedbacks": "", - "Feel free to add specific details": "უფასოდ დაამატეთ დეტალები", - "File": "", + "Feel free to add specific details": "სპეციფიკური დეტალების დამატება პრობლემა არაა", + "File": "ფაილი", "File added successfully.": "", "File content updated successfully.": "", - "File Mode": "ფაილური რეჟიმი", - "File not found.": "ფაილი ვერ მოიძებნა", + "File Mode": "ფაილის რეჟიმი", + "File not found.": "ფაილი ნაპოვნი არაა.", "File removed successfully.": "", "File size should not exceed {{maxSize}} MB.": "", "File uploaded successfully": "", - "Files": "", + "Files": "ფაილი", "Filter is now globally disabled": "", "Filter is now globally enabled": "", - "Filters": "", - "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "აღმოჩენილია თითის ანაბეჭდის გაყალბება: ინიციალების გამოყენება ავატარად შეუძლებელია. დეფოლტ პროფილის დეფოლტ სურათი.", - "Fluidly stream large external response chunks": "თხევადი ნაკადი დიდი გარე საპასუხო ნაწილაკების", - "Focus chat input": "ჩეთის შეყვანის ფოკუსი", + "Filters": "ფილტრები", + "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "აღმოჩენილია ანაბეჭდის გაყალბება: ინიციალების გამოყენება ავატარად შეუძლებელია. გამოყენებული იქნეა ნაგულისხმევი პროფილის სურათი.", + "Fluidly stream large external response chunks": "დიდი გარე პასუხის ფრაგმენტების გლუვად დასტრიმვა", + "Focus chat input": "ჩატში შეყვანის ფოკუსი", "Folder deleted successfully": "", "Folder name cannot be empty": "", "Folder name cannot be empty.": "", "Folder name updated successfully": "", - "Followed instructions perfectly": "ყველა ინსტრუქცია უზრუნველყოფა", + "Followed instructions perfectly": "ინსტრუქციების ზუსტად მიჰყევით", "Forge new paths": "", - "Form": "", + "Form": "ფორმა", "Format your variables using brackets like this:": "", "Frequency Penalty": "სიხშირის ჯარიმა", "Full Context Mode": "", - "Function": "", + "Function": "ფუნქცია", "Function Calling": "", "Function created successfully": "", "Function deleted successfully": "", @@ -499,128 +499,128 @@ "Function is now globally enabled": "", "Function Name": "", "Function updated successfully": "", - "Functions": "", + "Functions": "ფუნქციები", "Functions allow arbitrary code execution": "", "Functions allow arbitrary code execution.": "", "Functions imported successfully": "", - "Gemini": "", + "Gemini": "მარჩბივი", "Gemini API Config": "", "Gemini API Key is required.": "", "General": "ზოგადი", "General Settings": "ზოგადი პარამეტრები", "Generate an image": "", "Generate Image": "", - "Generating search query": "საძიებო მოთხოვნის გენერირება", - "Get started": "", + "Generating search query": "ძებნის მოთხოვნის გენერაცია", + "Get started": "დაიწყეთ", "Get started with {{WEBUI_NAME}}": "", - "Global": "", - "Good Response": "დიდი პასუხი", - "Google Drive": "", + "Global": "გლობალური", + "Good Response": "კარგი პასუხი", + "Google Drive": "Google Drive", "Google PSE API Key": "Google PSE API გასაღები", "Google PSE Engine Id": "Google PSE ძრავის Id", "Group created successfully": "", "Group deleted successfully": "", "Group Description": "", - "Group Name": "", + "Group Name": "ჯგუფის სახელი", "Group updated successfully": "", - "Groups": "", + "Groups": "ჯგუფები", "Haptic Feedback": "", - "has no conversations.": "არა უფლება ჩაწერა", + "has no conversations.": "არ აქვს საუბრები.", "Hello, {{name}}": "გამარჯობა, {{name}}", "Help": "დახმარება", "Help us create the best community leaderboard by sharing your feedback history!": "", - "Hex Color": "", + "Hex Color": "თექვსმეტობითი ფერი", "Hex Color - Leave empty for default color": "", "Hide": "დამალვა", - "Home": "", - "Host": "", - "How can I help you today?": "როგორ შემიძლია დაგეხმარო დღეს?", + "Home": "მთავარი", + "Host": "ჰოსტი", + "How can I help you today?": "რით შემიძლია დაგეხმაროთ დღეს?", "How would you rate this response?": "", "Hybrid Search": "ჰიბრიდური ძებნა", "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "", - "ID": "", + "ID": "ID", "Ignite curiosity": "", - "Image": "", + "Image": "გამოსახულება", "Image Compression": "", - "Image Generation": "", - "Image Generation (Experimental)": "სურათების გენერაცია (ექსპერიმენტული)", - "Image Generation Engine": "სურათის გენერაციის ძრავა", + "Image Generation": "გამოსახულების გენერაცია", + "Image Generation (Experimental)": "გამოსახულებების გენერაცია (ექსპერიმენტული)", + "Image Generation Engine": "გამოსახულებების გენერაციის ძრავა", "Image Max Compression Size": "", "Image Prompt Generation": "", "Image Prompt Generation Prompt": "", - "Image Settings": "სურათის პარამეტრები", - "Images": "სურათები", - "Import Chats": "მიმოწერების იმპორტი", + "Image Settings": "გამოსახულების პარამეტრები", + "Images": "გამოსახულებები", + "Import Chats": "ჩატების შემოტანა", "Import Config from JSON File": "", "Import Functions": "", - "Import Models": "იმპორტის მოდელები", - "Import Presets": "", - "Import Prompts": "მოთხოვნების იმპორტი", + "Import Models": "მოდელების შემოტანა", + "Import Presets": "პრესეტების შემოტანა", + "Import Prompts": "მოთხოვნების შემოტანა", "Import Tools": "", - "Include": "", + "Include": "ჩართვა", "Include `--api-auth` flag when running stable-diffusion-webui": "", - "Include `--api` flag when running stable-diffusion-webui": "ჩართეთ `--api` დროშა stable-diffusion-webui-ის გაშვებისას", + "Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას", "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", "Info": "ინფორმაცია", - "Input commands": "შეყვანით ბრძანებებს", - "Install from Github URL": "დააინსტალირეთ Github URL- დან", + "Input commands": "შეიყვანეთ ბრძანებები", + "Install from Github URL": "დაყენება Github-ის ბმულიდან", "Instant Auto-Send After Voice Transcription": "", "Interface": "ინტერფეისი", - "Invalid file format.": "", - "Invalid Tag": "არასწორი ტეგი", + "Invalid file format.": "არასწორი ფაილის ფორმატი.", + "Invalid Tag": "არასწორი ჭდე", "is typing...": "", "January": "იანვარი", "Jina API Key": "", - "join our Discord for help.": "შეუერთდით ჩვენს Discord-ს დახმარებისთვის", + "join our Discord for help.": "დახმარებისთვის შემოდით ჩვენს Discord-ზე.", "JSON": "JSON", "JSON Preview": "JSON გადახედვა", - "July": "ივნისი", - "June": "ივლა", + "July": "ივლისი", + "June": "ივნისი", "Jupyter Auth": "", "Jupyter URL": "", "JWT Expiration": "JWT-ის ვადა", "JWT Token": "JWT ტოკენი", "Kagi Search API Key": "", "Keep Alive": "აქტიურად დატოვება", - "Key": "", + "Key": "გასაღები", "Keyboard shortcuts": "კლავიატურის მალსახმობები", - "Knowledge": "", + "Knowledge": "ცოდნა", "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", - "Kokoro.js (Browser)": "", - "Kokoro.js Dtype": "", - "Label": "", + "Kokoro.js (Browser)": "Kokoro.js (ბრაუზერი)", + "Kokoro.js Dtype": "Kokoro.js Dtype", + "Label": "ჭდე", "Landing Page Mode": "", "Language": "ენა", - "Last Active": "ბოლო აქტიური", - "Last Modified": "", + "Last Active": "ბოლოს აქტიური", + "Last Modified": "ბოლო ცვლილება", "Last reply": "", - "LDAP": "", + "LDAP": "LDAP", "LDAP server updated": "", - "Leaderboard": "", + "Leaderboard": "ლიდერების დაფა", "Leave empty for unlimited": "", "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", - "License": "", - "Light": "მსუბუქი", + "License": "ლიცენზია", + "Light": "ღია", "Listening...": "", - "Llama.cpp": "", - "LLMs can make mistakes. Verify important information.": "შესაძლოა LLM-ებმა შეცდომები დაუშვან. გადაამოწმეთ მნიშვნელოვანი ინფორმაცია.", + "Llama.cpp": "Llama.cpp", + "LLMs can make mistakes. Verify important information.": "LLM-ებმა, შეიძლება, შეცდომები დაუშვან. გადაამოწმეთ მნიშვნელოვანი ინფორმაცია.", "Loading Kokoro.js...": "", - "Local": "", - "Local Models": "", - "Lost": "", + "Local": "ლოკალური", + "Local Models": "ლოკალური მოდელები", + "Lost": "წაგება", "LTR": "LTR", - "Made by Open WebUI Community": "დამზადებულია OpenWebUI საზოგადოების მიერ", - "Make sure to enclose them with": "დარწმუნდით, რომ დაურთეთ ისინი", + "Made by Open WebUI Community": "შექმნილია OpenWebUI საზოგადოების მიერ", + "Make sure to enclose them with": "დარწმუნდით, რომ ჩასვით ისინი", "Make sure to export a workflow.json file as API format from ComfyUI.": "", - "Manage": "", + "Manage": "მართვა", "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", @@ -628,261 +628,261 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "მილსადენების მართვა", - "March": "მარტივი", - "Max Tokens (num_predict)": "მაქს ტოკენსი (num_predict)", + "March": "მარტი", + "Max Tokens (num_predict)": "მაქს. ტოკეტები (num_predict)", "Max Upload Count": "", "Max Upload Size": "", - "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "მაქსიმუმ 3 მოდელის ჩამოტვირთვა შესაძლებელია ერთდროულად. Გთხოვთ სცადოთ მოგვიანებით.", - "May": "მაი", - "Memories accessible by LLMs will be shown here.": "ლლმ-ს აქვს ხელმისაწვდომი მემორიები აქ იქნება.", - "Memory": "მემორია", + "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "ერთდროულად მაქსიმუმ 3 მოდელის ჩამოტვირთვაა შესაძლებელია. მოგვიანებით სცადეთ.", + "May": "მაისი", + "Memories accessible by LLMs will be shown here.": "LLM-ებისთვის ხელმისაწვდომი მეხსიერებები აქ გამოჩნდება.", + "Memory": "მეხსიერება", "Memory added successfully": "", "Memory cleared successfully": "", "Memory deleted successfully": "", "Memory updated successfully": "", - "Merge Responses": "", + "Merge Responses": "პასუხების შერწყმა", "Message rating should be enabled to use this feature": "", "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "შეტყობინებები, რომელსაც თქვენ აგზავნით თქვენი ბმულის შექმნის შემდეგ, არ იქნება გაზიარებული. URL– ის მქონე მომხმარებლებს შეეძლებათ ნახონ საერთო ჩატი.", "Min P": "", "Minimum Score": "მინიმალური ქულა", - "Mirostat": "მიროსტატი", - "Mirostat Eta": "მიროსტატი ეტა", - "Mirostat Tau": "მიროსტატი ტაუ", - "Model": "", - "Model '{{modelName}}' has been successfully downloaded.": "მოდელი „{{modelName}}“ წარმატებით ჩამოიტვირთა.", - "Model '{{modelTag}}' is already in queue for downloading.": "მოდელი „{{modelTag}}“ უკვე ჩამოტვირთვის რიგშია.", - "Model {{modelId}} not found": "მოდელი {{modelId}} ვერ მოიძებნა", + "Mirostat": "Mirostat", + "Mirostat Eta": "Mirostat Eta", + "Mirostat Tau": "Mirostat Tau", + "Model": "მოდელი", + "Model '{{modelName}}' has been successfully downloaded.": "მოდელის „{{modelName}}“ გადმოწერა წარმატებით დასრულდა.", + "Model '{{modelTag}}' is already in queue for downloading.": "მოდელი „{{modelTag}}“ უკვე გადმოწერის რიგშია.", + "Model {{modelId}} not found": "მოდელი {{modelId}} აღმოჩენილი არაა", "Model {{modelName}} is not vision capable": "Model {{modelName}} is not vision capable", "Model {{name}} is now {{status}}": "Model {{name}} is now {{status}}", "Model accepts image inputs": "", "Model created successfully!": "", - "Model filesystem path detected. Model shortname is required for update, cannot continue.": "აღმოჩენილია მოდელის ფაილური სისტემის გზა. განახლებისთვის საჭიროა მოდელის მოკლე სახელი, გაგრძელება შეუძლებელია.", - "Model Filtering": "", + "Model filesystem path detected. Model shortname is required for update, cannot continue.": "აღმოჩენილია მოდელის ფაილური სისტემის ბილიკი. განახლებისთვის საჭიროა მოდელის მოკლე სახელი, გაგრძელება შეუძლებელია.", + "Model Filtering": "მოდელების გაფილტვრა", "Model ID": "მოდელის ID", - "Model IDs": "", - "Model Name": "", - "Model not selected": "მოდელი არ არის არჩეული", - "Model Params": "მოდელის პარამები", - "Model Permissions": "", + "Model IDs": "მოდელის ID-ები", + "Model Name": "Მოდელის სახელი", + "Model not selected": "მოდელი არჩეული არაა", + "Model Params": "მოდელის პარამეტრები", + "Model Permissions": "მოდელის წვდომები", "Model updated successfully": "", - "Modelfile Content": "მოდელური ფაილის კონტენტი", + "Modelfile Content": "მოდელის ფაილის შემცველობა", "Models": "მოდელები", - "Models Access": "", + "Models Access": "მოდელის წვდომა", "Models configuration saved successfully": "", "Mojeek Search API Key": "", - "more": "", - "More": "ვრცლად", + "more": "მეტი", + "More": "მეტი", "Name": "სახელი", "Name your knowledge base": "", - "Native": "", + "Native": "საკუთარი", "New Chat": "ახალი მიმოწერა", - "New Folder": "", + "New Folder": "ახალი საქაღალდე", "New Password": "ახალი პაროლი", - "new-channel": "", - "No content found": "", - "No content to speak": "", - "No distance available": "", + "new-channel": "new-channel", + "No content found": "შემცველობა აღმოჩენილი არაა", + "No content to speak": "წარმოსათქმელი შემცველობის გარეშე", + "No distance available": "მანძილი ხელმისაწვდომი არაა", "No feedbacks found": "", - "No file selected": "", - "No files found.": "", + "No file selected": "ფაილი არჩეული არაა", + "No files found.": "ფაილები ვერ მოიძებნა.", "No groups with access, add a group to grant access": "", "No HTML, CSS, or JavaScript content found.": "", "No inference engine with management support found": "", "No knowledge found": "", - "No model IDs": "", - "No models found": "", - "No models selected": "", - "No results found": "ჩვენ ვერ პოულობით ნაპოვნი ჩაწერები", - "No search query generated": "ძიების მოთხოვნა არ არის გენერირებული", - "No source available": "წყარო არ არის ხელმისაწვდომი", - "No users were found.": "", + "No model IDs": "მოდელის ID-ების გარეშე", + "No models found": "მოდელები აღმოჩენილი არაა", + "No models selected": "მოდელები არჩეული არაა", + "No results found": "შედეგების გარეშე", + "No search query generated": "ძებნის მოთხოვნა არ შექმნილა", + "No source available": "წყარო ხელმისაწვდომი არაა", + "No users were found.": "მომხმარებლები აღმოჩენილი არაა.", "No valves to update": "", "None": "არცერთი", - "Not factually correct": "არ ვეთანხმები პირდაპირ ვერც ვეთანხმები", - "Not helpful": "", + "Not factually correct": "მთლად სწორი არაა", + "Not helpful": "სასარგებლო არაა", "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "შენიშვნა: თუ თქვენ დააყენებთ მინიმალურ ქულას, ძებნა დააბრუნებს მხოლოდ დოკუმენტებს მინიმალური ქულის მეტი ან ტოლი ქულით.", - "Notes": "", - "Notification Sound": "", + "Notes": "შენიშვნები", + "Notification Sound": "გაფრთხილების ხმა", "Notification Webhook": "", - "Notifications": "შეტყობინება", + "Notifications": "გაფრთხილებები", "November": "ნოემბერი", - "num_gpu (Ollama)": "", + "num_gpu (Ollama)": "num_gpu (Ollama)", "num_thread (Ollama)": "num_thread (ოლამა)", - "OAuth ID": "", + "OAuth ID": "OAuth ID", "October": "ოქტომბერი", - "Off": "გამორთვა", - "Okay, Let's Go!": "კარგი, წავედით!", + "Off": "გამორთ", + "Okay, Let's Go!": "აბა, წავედით!", "OLED Dark": "OLED მუქი", "Ollama": "Ollama", "Ollama API": "Ollama API", "Ollama API settings updated": "", "Ollama Version": "Ollama ვერსია", - "On": "ჩართვა", + "On": "ჩართული", "Only alphanumeric characters and hyphens are allowed": "", - "Only alphanumeric characters and hyphens are allowed in the command string.": "ბრძანების სტრიქონში დაშვებულია მხოლოდ ალფანუმერული სიმბოლოები და დეფისები.", + "Only alphanumeric characters and hyphens are allowed in the command string.": "ბრძანების სტრიქონში დაშვებულია, მხოლოდ, ალფარიცხვითი სიმბოლოები და ტირეები.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", "Only select users and groups with permission can access": "", "Oops! Looks like the URL is invalid. Please double-check and try again.": "უი! როგორც ჩანს, მისამართი არასწორია. გთხოვთ, გადაამოწმოთ და ისევ სცადოთ.", "Oops! There are files still uploading. Please wait for the upload to complete.": "", "Oops! There was an error in the previous response.": "", - "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "უპს! თქვენ იყენებთ მხარდაუჭერელ მეთოდს (მხოლოდ frontend). გთხოვთ, მოემსახუროთ WebUI-ს ბექენდიდან", - "Open file": "", - "Open in full screen": "", - "Open new chat": "ახალი მიმოწერის გახსნა", + "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "ვაი! იყენებთ მხარდაუჭერელ მეთოდს (მხოლოდ წინაბოლო). შედით WebUI-ზე უკანაბოლოდან.", + "Open file": "ფაილის გახსნა", + "Open in full screen": "მთელ ეკრანზე გახსნა", + "Open new chat": "ახალი ჩატის გახსნა", "Open WebUI uses faster-whisper internally.": "", "Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.": "", "Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "", "OpenAI": "OpenAI", "OpenAI API": "OpenAI API", "OpenAI API Config": "OpenAI API პარამეტრები", - "OpenAI API Key is required.": "OpenAI API გასაღები აუცილებელია", + "OpenAI API Key is required.": "OpenAI API გასაღები აუცილებელია.", "OpenAI API settings updated": "", - "OpenAI URL/Key required.": "OpenAI URL/Key აუცილებელია", + "OpenAI URL/Key required.": "OpenAI URL/Key აუცილებელია.", "or": "ან", "Organize your users": "", "Other": "სხვა", "OUTPUT": "", - "Output format": "", - "Overview": "", - "page": "", + "Output format": "გამოტანის ფორმატი", + "Overview": "მიმოხილვა", + "page": "პანელი", "Password": "პაროლი", "Paste Large Text as File": "", "PDF document (.pdf)": "PDF დოკუმენტი (.pdf)", "PDF Extract Images (OCR)": "PDF იდან ამოღებული სურათები (OCR)", - "pending": "ლოდინის რეჟიმშია", + "pending": "დარჩენილი", "Permission denied when accessing media devices": "", "Permission denied when accessing microphone": "", "Permission denied when accessing microphone: {{error}}": "ნებართვა უარყოფილია მიკროფონზე წვდომისას: {{error}}", - "Permissions": "", + "Permissions": "ნებართვები", "Personalization": "პერსონალიზაცია", - "Pin": "", - "Pinned": "", + "Pin": "მიმაგრება", + "Pinned": "მიმაგრებულია", "Pioneer insights": "", "Pipeline deleted successfully": "", "Pipeline downloaded successfully": "", "Pipelines": "მილსადენები", "Pipelines Not Detected": "", - "Pipelines Valves": "მილსადენების სარქველები", - "Plain text (.txt)": "ტექსტი (.txt)", - "Playground": "სათამაშო მოედანი", + "Pipelines Valves": "მილსადენის სარქველები", + "Plain text (.txt)": "უბრალო ტექსტი (.txt)", + "Playground": "საცდელი ფუნქციები", "Please carefully review the following warnings:": "", "Please do not close the settings page while loading the model.": "", "Please enter a prompt": "", - "Please fill in all fields.": "", - "Please select a model first.": "", - "Please select a model.": "", - "Please select a reason": "", - "Port": "", - "Positive attitude": "პოზიტიური ანგარიში", - "Prefix ID": "", + "Please fill in all fields.": "შეავსეთ ყველა ველი ბოლომდე.", + "Please select a model first.": "ჯერ აირჩიეთ მოდელი, გეთაყვა.", + "Please select a model.": "აირჩიეთ მოდელი.", + "Please select a reason": "აირჩიეთ მიზეზი", + "Port": "პორტი", + "Positive attitude": "პოზიტიური დამოკიდებულება", + "Prefix ID": "პრეფიქსის ", "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "", "Presence Penalty": "", - "Previous 30 days": "უკან 30 დღე", - "Previous 7 days": "უკან 7 დღე", + "Previous 30 days": "წინა 30 დღე", + "Previous 7 days": "წინა 7 დღე", "Profile Image": "პროფილის სურათი", - "Prompt": "", + "Prompt": "ბრძანების შეყვანის შეხსენება", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (მაგ. მითხარი სახალისო ფაქტი რომის იმპერიის შესახებ)", - "Prompt Content": "მოთხოვნის შინაარსი", + "Prompt Content": "მოთხოვნის შემცველობა", "Prompt created successfully": "", "Prompt suggestions": "მოთხოვნის რჩევები", "Prompt updated successfully": "", "Prompts": "მოთხოვნები", "Prompts Access": "", - "Proxy URL": "", - "Pull \"{{searchValue}}\" from Ollama.com": "ჩაიამოვეთ \"{{searchValue}}\" Ollama.com-იდან", - "Pull a model from Ollama.com": "Ollama.com იდან მოდელის გადაწერა ", + "Proxy URL": "პროქსის URL", + "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\"-ის გადმოწერა Ollama.com-იდან", + "Pull a model from Ollama.com": "მოდელის გადმოწერა Ollama.com-დან", "Query Generation Prompt": "", - "Query Params": "პარამეტრების ძიება", + "Query Params": "პარამეტრების მოთხოვნა", "RAG Template": "RAG შაბლონი", - "Rating": "", + "Rating": "ხმის მიცემა", "Re-rank models by topic similarity": "", - "Read": "", - "Read Aloud": "ხმის ჩაწერა", + "Read": "წაკითხვა", + "Read Aloud": "ხმამაღლა წაკითხვა", "Reasoning Effort": "", "Record voice": "ხმის ჩაწერა", - "Redirecting you to Open WebUI Community": "გადამისამართდებით OpenWebUI საზოგადოებაში", + "Redirecting you to Open WebUI Community": "მიმდინარეობს გადამისამართება OpenWebUI-ის საზოგადოების საიტზე", "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "უარა, როგორც უნდა იყოს", - "Regenerate": "ხელახლა გენერირება", - "Release Notes": "Გამოშვების შენიშვნები", - "Relevance": "", - "Remove": "პოპულარობის რაოდენობა", - "Remove Model": "პოპულარობის რაოდენობა", - "Rename": "პოპულარობის რაოდენობა", + "Regenerate": "თავიდან გენერაცია", + "Release Notes": "გამოცემის შენიშვნები", + "Relevance": "შესაბამისობა", + "Remove": "წაშლა", + "Remove Model": "მოდელის წაშლა", + "Rename": "სახელის გადარქმევა", "Reorder Models": "", - "Repeat Last N": "გაიმეორეთ ბოლო N", + "Repeat Last N": "ბოლო N-ის გამეორება", "Repeat Penalty (Ollama)": "", - "Reply in Thread": "", + "Reply in Thread": "ნაკადში პასუხი", "Request Mode": "მოთხოვნის რეჟიმი", - "Reranking Model": "რექვექტირება", - "Reranking model disabled": "რექვექტირება არაა ჩართული", + "Reranking Model": "Reranking მოდელი", + "Reranking model disabled": "Reranking მოდელი გათიშულია", "Reranking model set to \"{{reranking_model}}\"": "Reranking model set to \"{{reranking_model}}\"", - "Reset": "", + "Reset": "ჩამოყრა", "Reset All Models": "", "Reset Upload Directory": "", "Reset Vector Storage/Knowledge": "", "Reset view": "", "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "", "Response splitting": "", - "Result": "", + "Result": "შედეგი", "Retrieval Query Generation": "", "Rich Text Input for Chat": "", - "RK": "", + "RK": "RK", "Role": "როლი", - "Rosé Pine": "ვარდისფერი ფიჭვის ხე", + "Rosé Pine": "ვარდისფერი ფიჭვი", "Rosé Pine Dawn": "ვარდისფერი ფიჭვის გარიჟრაჟი", "RTL": "RTL", - "Run": "", - "Running": "", + "Run": "გაშვება", + "Running": "გაშვებულია", "Save": "შენახვა", - "Save & Create": "დამახსოვრება და შექმნა", - "Save & Update": "დამახსოვრება და განახლება", - "Save As Copy": "", - "Save Tag": "", - "Saved": "", + "Save & Create": "შენახვა და შექმნა", + "Save & Update": "შენახვა და განახლება", + "Save As Copy": "ასლის შენახვა", + "Save Tag": "ჭდის შენახვა", + "Saved": "შენახულია", "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "ჩეთის ისტორიის შენახვა პირდაპირ თქვენი ბრაუზერის საცავში აღარ არის მხარდაჭერილი. გთხოვთ, დაუთმოთ და წაშალოთ თქვენი ჩატის ჟურნალები ქვემოთ მოცემულ ღილაკზე დაწკაპუნებით. არ ინერვიულოთ, თქვენ შეგიძლიათ მარტივად ხელახლა შემოიტანოთ თქვენი ჩეთის ისტორია ბექენდში", "Scroll to bottom when switching between branches": "", - "Search": "ძიება", - "Search a model": "მოდელის ძიება", - "Search Base": "", - "Search Chats": "ჩატების ძებნა", - "Search Collection": "", - "Search Filters": "", - "search for tags": "", - "Search Functions": "", + "Search": "ძებნა", + "Search a model": "მოდელის ძებნა", + "Search Base": "ბაზის ძებნა", + "Search Chats": "ძებნა ჩატებში", + "Search Collection": "კოლექციის ძებნა", + "Search Filters": "ფილტრების ძებნა", + "search for tags": "ჭდეების ძებნა", + "Search Functions": "ფუნქციების ძებნა", "Search Knowledge": "", - "Search Models": "საძიებო მოდელები", - "Search options": "", - "Search Prompts": "მოთხოვნების ძიება", + "Search Models": "მოდელების ძებნა", + "Search options": "ძებნის მორგება", + "Search Prompts": "მოთხოვნების ძებნა", "Search Result Count": "ძიების შედეგების რაოდენობა", - "Search the internet": "", - "Search Tools": "", - "SearchApi API Key": "", - "SearchApi Engine": "", - "Searched {{count}} sites": "", - "Searching \"{{searchQuery}}\"": "", + "Search the internet": "ინტერნეტში ძებნა", + "Search Tools": "ძებნის ხელსაწყოები", + "SearchApi API Key": "SearchApi API-ის გასაღები", + "SearchApi Engine": "ძრავა SearchApi", + "Searched {{count}} sites": "მოძებნილია {{count}} საიტზე", + "Searching \"{{searchQuery}}\"": "მიმდინარეობს ძებნა \"{{searchQuery}}\"", "Searching Knowledge for \"{{searchQuery}}\"": "", "Searxng Query URL": "Searxng Query URL", - "See readme.md for instructions": "იხილეთ readme.md ინსტრუქციებისთვის", - "See what's new": "სიახლეების ნახვა", - "Seed": "სიდი", - "Select a base model": "აირჩიეთ ბაზის მოდელი", - "Select a engine": "", - "Select a function": "", - "Select a group": "", - "Select a model": "მოდელის არჩევა", + "See readme.md for instructions": "ინსტრუქციებისთვის იხილეთ readme.md", + "See what's new": "ნახეთ, რა არის ახალი", + "Seed": "თესლი", + "Select a base model": "აირჩიეთ საბაზისო მოდელი", + "Select a engine": "აირჩიეთ ძრავა", + "Select a function": "აირჩიეთ ფუნქცია", + "Select a group": "აირჩიეთ ჯგუფი", + "Select a model": "აირჩიეთ მოდელი", "Select a pipeline": "აირჩიეთ მილსადენი", "Select a pipeline url": "აირჩიეთ მილსადენის url", - "Select a tool": "", + "Select a tool": "აირჩიეთ ხელსაწყო", "Select an auth method": "", "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", "Select model": "მოდელის არჩევა", "Select only one model to call": "", - "Selected model(s) do not support image inputs": "შერჩეული მოდელი (ებ) ი არ უჭერს მხარს გამოსახულების შეყვანას", + "Selected model(s) do not support image inputs": "მონიშნულ მოდელებს გამოსახულების შეყვანის მხარდაჭერა არ გააჩნიათ", "Semantic distance to query": "", "Send": "გაგზავნა", "Send a Message": "შეტყობინების გაგზავნა", @@ -891,21 +891,21 @@ "September": "სექტემბერი", "SerpApi API Key": "", "SerpApi Engine": "", - "Serper API Key": "Serper API Key", + "Serper API Key": "Serper API-ის გასაღები", "Serply API Key": "", - "Serpstack API Key": "Serpstack API Key", - "Server connection verified": "სერვერთან კავშირი დადასტურებულია", - "Set as default": "დეფოლტად დაყენება", + "Serpstack API Key": "Serpstack API-ის გასაღები", + "Server connection verified": "სერვერთან კავშირი გადამოწმებულია", + "Set as default": "ნაგულისხმევად დაყენება", "Set CFG Scale": "", - "Set Default Model": "დეფოლტ მოდელის დაყენება", + "Set Default Model": "ნაგულისხმევი მოდელის დაყენება", "Set embedding model": "", - "Set embedding model (e.g. {{model}})": "ჩვენება მოდელის დაყენება (მაგ. {{model}})", - "Set Image Size": "სურათის ზომის დაყენება", - "Set reranking model (e.g. {{model}})": "რეტარირება მოდელის დაყენება (მაგ. {{model}})", + "Set embedding model (e.g. {{model}})": "ჩვენება ჩაშენებული მოდელის დაყენება (მაგ. {{model}})", + "Set Image Size": "გამოსახულების ზომის დაყენება", + "Set reranking model (e.g. {{model}})": "Reranking მოდელის დაყენება (მაგ. {{model}})", "Set Sampler": "", "Set Scheduler": "", "Set Steps": "ნაბიჯების დაყენება", - "Set Task Model": "დააყენეთ სამუშაო მოდელი", + "Set Task Model": "დავალების მოდელის დაყენება", "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "", "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "ხმის დაყენება", @@ -916,18 +916,18 @@ "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", "Sets the size of the context window used to generate the next token. (Default: 2048)": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", - "Settings": "ხელსაწყოები", - "Settings saved successfully!": "პარამეტრები წარმატებით განახლდა!", + "Settings": "მორგება", + "Settings saved successfully!": "პარამეტრები შენახვა წარმატებულია!", "Share": "გაზიარება", - "Share Chat": "გაზიარება", - "Share to Open WebUI Community": "გააზიარე OpenWebUI საზოგადოებაში ", + "Share Chat": "ჩატის გაზიარება", + "Share to Open WebUI Community": "გაზიარება Open WebUI-ის საზოგადოებასთან", "Show": "ჩვენება", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", "Show shortcuts": "მალსახმობების ჩვენება", "Show your support!": "", - "Showcased creativity": "ჩვენებული ქონება", - "Sign in": "ავტორიზაცია", + "Showcased creativity": "გამოკვეთილი კრეატიულობა", + "Sign in": "შესვლა", "Sign in to {{WEBUI_NAME}}": "", "Sign in to {{WEBUI_NAME}} with LDAP": "", "Sign Out": "გასვლა", @@ -937,18 +937,18 @@ "sk-1234": "", "Source": "წყარო", "Speech Playback Speed": "", - "Speech recognition error: {{error}}": "მეტყველების ამოცნობის შეცდომა: {{error}}", - "Speech-to-Text Engine": "ხმოვან-ტექსტური ძრავი", - "Stop": "", - "Stop Sequence": "შეჩერების თანმიმდევრობა", + "Speech recognition error: {{error}}": "საუბრის ამოცნობის შეცდომა: {{error}}", + "Speech-to-Text Engine": "საუბრიდან-ტექსტამდე-ის ძრავი", + "Stop": "გაჩერება", + "Stop Sequence": "შეჩერების მიმდევრობა", "Stream Chat Response": "", "STT Model": "", - "STT Settings": "მეტყველების ამოცნობის პარამეტრები", + "STT Settings": "STT-ის მორგება", "Subtitle (e.g. about the Roman Empire)": "სუბტიტრები (მაგ. რომის იმპერიის შესახებ)", "Success": "წარმატება", - "Successfully updated.": "წარმატებით განახლდა", - "Suggested": "პირდაპირ პოპულარული", - "Support": "", + "Successfully updated.": "წარმატებით განახლდა.", + "Suggested": "შეთავაზებულია", + "Support": "მხარდაჭერა", "Support this plugin:": "", "Sync directory": "", "System": "სისტემა", @@ -958,11 +958,11 @@ "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", "Tap to interrupt": "", - "Tasks": "", + "Tasks": "ამოცანები", "Tavily API Key": "", - "Tell us more:": "ჩვენთან დავუკავშირდით", + "Tell us more:": "გვითხარით მეტი:", "Temperature": "ტემპერატურა", - "Template": "შაბლონი", + "Template": "ნიმუში", "Temporary Chat": "", "Text Splitter": "", "Text-to-Speech Engine": "ტექსტურ-ხმოვანი ძრავი", @@ -978,12 +978,12 @@ "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "", "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", - "The score should be a value between 0.0 (0%) and 1.0 (100%).": "ქულა 0.0 (0%) და 1.0 (100%) ჩაშენებული უნდა იყოს.", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "რეიტინგი უნდა იყოს მნიშვნელობ შუალედიდან 0.0 (0%) - 1.0 (100%).", "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", "Theme": "თემა", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", - "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ძვირფასი საუბრები უსაფრთხოდ შეინახება თქვენს backend მონაცემთა ბაზაში. Გმადლობთ!", + "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", @@ -994,7 +994,7 @@ "This will delete all models including custom models": "", "This will delete all models including custom models and cannot be undone.": "", "This will reset the knowledge base and sync all files. Do you wish to continue?": "", - "Thorough explanation": "ვრცლად აღწერა", + "Thorough explanation": "საფუძვლიანი ახსნა", "Thought for {{DURATION}}": "", "Thought for {{DURATION}} seconds": "", "Tika": "", @@ -1002,14 +1002,14 @@ "Tiktoken": "", "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "რჩევა: განაახლეთ რამდენიმე ცვლადი სლოტი თანმიმდევრულად, ყოველი ჩანაცვლების შემდეგ ჩატის ღილაკზე დაჭერით.", "Title": "სათაური", - "Title (e.g. Tell me a fun fact)": "სათაური (მაგ. გაიხსნე რაღაც ხარისხი)", - "Title Auto-Generation": "სათაურის ავტო-გენერაცია", - "Title cannot be an empty string.": "სათაური ცარიელი ველი ვერ უნდა იყოს.", + "Title (e.g. Tell me a fun fact)": "სათაური (მაგ. მითხარი რამე სასაცილო)", + "Title Auto-Generation": "სათაურის ავტოგენერაცია", + "Title cannot be an empty string.": "სათაურის ველი ცარიელი სტრიქონი ვერ იქნება.", "Title Generation": "", - "Title Generation Prompt": "სათაურის გენერაციის მოთხოვნა ", - "TLS": "", - "To access the available model names for downloading,": "ჩამოტვირთვისთვის ხელმისაწვდომი მოდელების სახელებზე წვდომისთვის", - "To access the GGUF models available for downloading,": "ჩასატვირთად ხელმისაწვდომი GGUF მოდელებზე წვდომისთვის", + "Title Generation Prompt": "სათაურის შექმნის მოთხოვნა", + "TLS": "TLS", + "To access the available model names for downloading,": "ხელმისაწვდომი მოდელის სახელებთან წვდომისთვის, რომ გადმოწეროთ,", + "To access the GGUF models available for downloading,": "გადმოსაწერად ხელმისაწვდომი GGUF მოდელებზე წვდომისთვის,", "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "", "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "", "To learn more about available endpoints, visit our documentation.": "", @@ -1020,8 +1020,8 @@ "Toast notifications for new updates": "", "Today": "დღეს", "Toggle settings": "პარამეტრების გადართვა", - "Toggle sidebar": "გვერდითი ზოლის გადართვა", - "Token": "", + "Toggle sidebar": "გვერდითი ზოლის ჩართ/გამორთ", + "Token": "კოდი", "Tokens To Keep On Context Refresh (num_keep)": "", "Too verbose": "", "Tool created successfully": "", @@ -1029,9 +1029,9 @@ "Tool Description": "", "Tool ID": "", "Tool imported successfully": "", - "Tool Name": "", + "Tool Name": "ხელსაწყოს სახელი", "Tool updated successfully": "", - "Tools": "", + "Tools": "ხელსაწყოები", "Tools Access": "", "Tools are a function calling system with arbitrary code execution": "", "Tools Function Calling Prompt": "", @@ -1045,67 +1045,67 @@ "TTS Settings": "TTS პარამეტრები", "TTS Voice": "", "Type": "ტიპი", - "Type Hugging Face Resolve (Download) URL": "სცადე გადმოწერო Hugging Face Resolve URL", + "Type Hugging Face Resolve (Download) URL": "აკრიფეთ HuggingFace-ის ამოხსნის (გადმოწერის) URL", "Uh-oh! There was an issue with the response.": "", - "UI": "", + "UI": "UI", "Unarchive All": "", "Unarchive All Archived Chats": "", "Unarchive Chat": "", "Unlock mysteries": "", - "Unpin": "", + "Unpin": "ჩამოხსნა", "Unravel secrets": "", - "Untagged": "", - "Update": "", + "Untagged": "ჭდის გარეშე", + "Update": "განახლება", "Update and Copy Link": "განახლება და ბმულის კოპირება", "Update for the latest features and improvements.": "", "Update password": "პაროლის განახლება", - "Updated": "", - "Updated at": "", + "Updated": "განახლებულია", + "Updated at": "განახლების დრო", "Updated At": "", "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "", - "Upload": "", + "Upload": "ატვირთვა", "Upload a GGUF model": "GGUF მოდელის ატვირთვა", - "Upload directory": "", - "Upload files": "", - "Upload Files": "ატვირთეთ ფაილები", + "Upload directory": "ატვირთვის დირექტორია", + "Upload files": "ფაილების ატვირთვა", + "Upload Files": "ფაილების ატვირთვა", "Upload Pipeline": "", - "Upload Progress": "პროგრესის ატვირთვა", - "URL": "", + "Upload Progress": "ატვირთვის მიმდინარეობა", + "URL": "URL", "URL Mode": "URL რეჟიმი", "Use '#' in the prompt input to load and include your knowledge.": "", - "Use Gravatar": "გამოიყენე Gravatar", + "Use Gravatar": "Gravatar-ის გამოყენება", "Use groups to group your users and assign permissions.": "", - "Use Initials": "გამოიყენე ინიციალები", - "use_mlock (Ollama)": "use_mlock (ოლამა)", - "use_mmap (Ollama)": "use_mmap (ოლამა)", + "Use Initials": "ინიციალების გამოყენება", + "use_mlock (Ollama)": "use_mlock (Ollama)", + "use_mmap (Ollama)": "use_mmap (Ollama)", "user": "მომხმარებელი", - "User": "", + "User": "მომხმარებელი", "User location successfully retrieved.": "", - "Username": "", + "Username": "მომხმარებლის სახელი", "Users": "მომხმარებლები", "Using the default arena model with all models. Click the plus button to add custom models.": "", "Utilize": "გამოყენება", - "Valid time units:": "მოქმედი დროის ერთეულები", + "Valid time units:": "სწორი დროის ერთეულები:", "Valves": "", "Valves updated": "", "Valves updated successfully": "", "variable": "ცვლადი", - "variable to have them replaced with clipboard content.": "ცვლადი, რომ შეცვალოს ისინი ბუფერში შიგთავსით.", + "variable to have them replaced with clipboard content.": "ცვლადი მისი ბუფერის მნიშვნელობით ჩასანაცვლებლად.", "Version": "ვერსია", "Version {{selectedVersion}} of {{totalVersions}}": "", "View Replies": "", - "Visibility": "", - "Voice": "", + "Visibility": "ხილვადობა", + "Voice": "ხმა", "Voice Input": "", "Warning": "გაფრთხილება", - "Warning:": "", + "Warning:": "გაფრთხილება:", "Warning: Enabling this will allow users to upload arbitrary code on the server.": "", - "Warning: If you update or change your embedding model, you will need to re-import all documents.": "გაფრთხილება: თუ განაახლებთ ან შეცვლით ჩანერგვის მოდელს, მოგიწევთ ყველა დოკუმენტის ხელახლა იმპორტი.", + "Warning: If you update or change your embedding model, you will need to re-import all documents.": "გაფრთხილება: თუ განაახლებთ ან შეცვლით თქვენს ჩაშენებულ მოდელს, მოგიწევთ ყველა დოკუმენტის ხელახლა შემოტანა.", "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "", "Web": "ვები", "Web API": "", - "Web Loader Settings": "ვების ჩატარების პარამეტრები", - "Web Search": "ვებ ძებნა", + "Web Loader Settings": "ვებჩამტვირთავის მორგება", + "Web Search": "ვებში ძებნა", "Web Search Engine": "ვებ საძიებო სისტემა", "Web Search in Chat": "", "Web Search Query Generation": "", @@ -1120,29 +1120,29 @@ "When enabled, the model will respond to each chat message in real-time, generating a response as soon as the user sends a message. This mode is useful for live chat applications, but may impact performance on slower hardware.": "", "wherever you are": "", "Whisper (Local)": "", - "Why?": "", + "Why?": "რატომ?", "Widescreen Mode": "", - "Won": "", + "Won": "ვონი", "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", - "Workspace": "ვულერი", + "Workspace": "სამუშაო სივრცე", "Workspace Permissions": "", - "Write": "", + "Write": "ჩაწერა", "Write a prompt suggestion (e.g. Who are you?)": "დაწერეთ მოკლე წინადადება (მაგ. ვინ ხარ?", "Write a summary in 50 words that summarizes [topic or keyword].": "დაწერეთ რეზიუმე 50 სიტყვით, რომელიც აჯამებს [თემას ან საკვანძო სიტყვას].", "Write something...": "", "Write your model template content here": "", - "Yesterday": "აღდგენა", - "You": "ჩემი", + "Yesterday": "გუშინ", + "You": "თქვენ", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", - "You have no archived conversations.": "არ ხართ არქივირებული განხილვები.", - "You have shared this chat": "ამ ჩატის გააგზავნა", - "You're a helpful assistant.": "თქვენ სასარგებლო ასისტენტი ხართ.", - "You're now logged in.": "თქვენ შესული ხართ.", + "You have no archived conversations.": "დაარქივებული საუბრები არ გაქვთ.", + "You have shared this chat": "თქვენ გააზიარეთ ეს ჩატი", + "You're a helpful assistant.": "თქვენ სასარგებლო ასისტენტი ბრძანდებით.", + "You're now logged in.": "ახლა შესული ბრძანდებით.", "Your account status is currently pending activation.": "", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "", "Youtube": "Youtube", From 0335d479f9b3be4054a022c77ff55db1e4684268 Mon Sep 17 00:00:00 2001 From: Gunwoo Hur Date: Mon, 24 Feb 2025 17:27:37 +0900 Subject: [PATCH 055/623] feat: add onedrive file picker --- package-lock.json | 22 ++ package.json | 1 + src/lib/components/chat/MessageInput.svelte | 21 ++ .../chat/MessageInput/InputMenu.svelte | 31 +++ src/lib/utils/onedrive-auth.ts | 42 ++++ src/lib/utils/onedrive-file-picker.ts | 211 ++++++++++++++++++ 6 files changed, 328 insertions(+) create mode 100644 src/lib/utils/onedrive-auth.ts create mode 100644 src/lib/utils/onedrive-file-picker.ts diff --git a/package-lock.json b/package-lock.json index c6587077219..066cf2be53f 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,6 +8,7 @@ "name": "open-webui", "version": "0.5.16", "dependencies": { + "@azure/msal-browser": "^4.4.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", @@ -134,6 +135,27 @@ "node": ">=6.0.0" } }, + "node_modules/@azure/msal-browser": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.4.0.tgz", + "integrity": "sha512-rU6juYXk67CKQmpgi6fDgZoPQ9InZ1760z1BSAH7RbeIc4lHZM/Tu+H0CyRk7cnrfvTkexyYE4pjYhMghpzheA==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.2.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-common": { + "version": "15.2.0", + "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.2.0.tgz", + "integrity": "sha512-HiYfGAKthisUYqHG1nImCf/uzcyS31wng3o+CycWLIM9chnYJ9Lk6jZ30Y6YiYYpTQ9+z/FGUpiKKekd3Arc0A==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/@babel/runtime": { "version": "7.24.1", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz", diff --git a/package.json b/package.json index 86568869ffb..0e8fe2bc6fd 100644 --- a/package.json +++ b/package.json @@ -51,6 +51,7 @@ }, "type": "module", "dependencies": { + "@azure/msal-browser": "^4.4.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index 38cb91cc022..a81139d2f0c 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -2,6 +2,7 @@ import { toast } from 'svelte-sonner'; import { v4 as uuidv4 } from 'uuid'; import { createPicker, getAuthToken } from '$lib/utils/google-drive-picker'; + import { openOneDrivePicker } from '$lib/utils/onedrive-file-picker'; import { onMount, tick, getContext, createEventDispatcher, onDestroy } from 'svelte'; const dispatch = createEventDispatcher(); @@ -1108,6 +1109,26 @@ ); } }} + uploadOneDriveHandler={async () => { + try { + const fileData = await openOneDrivePicker(); + if (fileData) { + const file = new File([fileData.blob], fileData.name, { + type: fileData.blob.type + }); + await uploadFileHandler(file); + } else { + console.log('No file was selected from OneDrive'); + } + } catch (error) { + console.error('OneDrive Error:', error); + toast.error( + $i18n.t('Error accessing OneDrive: {{error}}', { + error: error.message + }) + ); + } + }} onClose={async () => { await tick(); diff --git a/src/lib/components/chat/MessageInput/InputMenu.svelte b/src/lib/components/chat/MessageInput/InputMenu.svelte index 801093d8f81..91f9cf81b4a 100644 --- a/src/lib/components/chat/MessageInput/InputMenu.svelte +++ b/src/lib/components/chat/MessageInput/InputMenu.svelte @@ -5,6 +5,7 @@ import { config, user, tools as _tools, mobile } from '$lib/stores'; import { createPicker } from '$lib/utils/google-drive-picker'; + import { getTools } from '$lib/apis/tools'; import Dropdown from '$lib/components/common/Dropdown.svelte'; @@ -24,6 +25,7 @@ export let inputFilesHandler: Function; export let uploadGoogleDriveHandler: Function; + export let uploadOneDriveHandler: Function; export let selectedToolIds: string[] = []; @@ -225,6 +227,35 @@
{$i18n.t('Google Drive')}
{/if} + + {#if $config?.features?.enable_onedrive_integration || true} + { + uploadOneDriveHandler(); + }} + > + + + + + + +
{$i18n.t('OneDrive')}
+
+ {/if} diff --git a/src/lib/utils/onedrive-auth.ts b/src/lib/utils/onedrive-auth.ts new file mode 100644 index 00000000000..be2de44a013 --- /dev/null +++ b/src/lib/utils/onedrive-auth.ts @@ -0,0 +1,42 @@ +import { PublicClientApplication } from '@azure/msal-browser'; + +const msalParams = { + auth: { + authority: 'https://login.microsoftonline.com/consumers', + clientId: '2ab80a1e-7300-4cb1-beac-c38c730e8b7f' + } +}; + +// MSAL 초기화 +const app = new PublicClientApplication(msalParams); + +export async function initializeMsal() { + try { + await app.initialize(); + console.log('MSAL initialized successfully'); + } catch (error) { + console.error('MSAL initialization error:', error); + } + } + + export async function getToken(): Promise { + const authParams = { scopes: ['OneDrive.ReadWrite'] }; + let accessToken = ''; + + try { + // Ensure initialization happens early + await initializeMsal(); + const resp = await app.acquireTokenSilent(authParams); + accessToken = resp.accessToken; + } catch (err) { + const resp = await app.loginPopup(authParams); + app.setActiveAccount(resp.account); + + if (resp.idToken) { + const resp2 = await app.acquireTokenSilent(authParams); + accessToken = resp2.accessToken; + } + } + + return accessToken; + } diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts new file mode 100644 index 00000000000..d003e38ec14 --- /dev/null +++ b/src/lib/utils/onedrive-file-picker.ts @@ -0,0 +1,211 @@ +// src/lib/utils/onedrive-file-picker.ts +import { getToken } from './onedrive-auth'; + + +const baseUrl = "https://onedrive.live.com/picker"; +const params = { + sdk: '8.0', + entry: { + oneDrive: { + files: {} + } + }, + authentication: {}, + messaging: { + origin: 'http://localhost:3000', // 현재 부모 페이지의 origin + channelId: '27' // 메시징 채널용 임의의 ID + }, + typesAndSources: { + mode: 'files', + pivots: { + oneDrive: true, + recent: true + } + } +}; + +/** + * OneDrive 파일 피커 창을 열고, 사용자가 선택한 파일 메타데이터를 받아오는 함수 + */ +export async function openOneDrivePicker(): Promise { + // SSR 환경(SvelteKit)에서 window 객체가 없을 수 있으므로 가드 + if (typeof window === 'undefined') { + throw new Error('Not in browser environment'); + } + + return new Promise(async (resolve, reject) => { + let pickerWindow: Window | null = null; + let channelPort: MessagePort | null = null; + + try { + const authToken = await getToken(); + if (!authToken) { + return reject(new Error('Failed to acquire access token')); + } + + // 팝업 창 오픈 + pickerWindow = window.open('', 'OneDrivePicker', 'width=800,height=600'); + if (!pickerWindow) { + return reject(new Error('Failed to open OneDrive picker window')); + } + + // 쿼리스트링 구성 + const queryString = new URLSearchParams({ + filePicker: JSON.stringify(params) + }); + const url = `${baseUrl}?${queryString.toString()}`; + + // 새로 연 window에 form을 동적으로 추가하여 POST + const form = pickerWindow.document.createElement('form'); + form.setAttribute('action', url); + form.setAttribute('method', 'POST'); + + const input = pickerWindow.document.createElement('input'); + input.setAttribute('type', 'hidden'); + input.setAttribute('name', 'access_token'); + input.setAttribute('value', authToken); + + form.appendChild(input); + pickerWindow.document.body.appendChild(form); + form.submit(); + + // 부모 창에서 message 이벤트 수신 + const handleWindowMessage = (event: MessageEvent) => { + // pickerWindow가 아닌 다른 window에서 온 메시지는 무시 + if (event.source !== pickerWindow) return; + + const message = event.data; + + // 초기화 메시지 => SharedWorker(MessageChannel) 식으로 포트 받기 + if ( + message?.type === 'initialize' && + message?.channelId === params.messaging.channelId + ) { + channelPort = event.ports?.[0]; + if (!channelPort) return; + + channelPort.addEventListener('message', handlePortMessage); + channelPort.start(); + + // picker iframe에 'activate' 전달 + channelPort.postMessage({ + type: 'activate' + }); + } + }; + + // 포트 메시지 핸들러 + const handlePortMessage = async (portEvent: MessageEvent) => { + const portData = portEvent.data; + switch (portData.type) { + case 'notification': + console.log('notification:', portData); + break; + + case 'command': { + // picker에 응답 + channelPort?.postMessage({ + type: 'acknowledge', + id: portData.id + }); + + const command = portData.data; + + switch (command.command) { + case 'authenticate': { + // 재인증 + try { + const newToken = await getToken(); + if (newToken) { + channelPort?.postMessage({ + type: 'result', + id: portData.id, + data: { + result: 'token', + token: newToken + } + }); + } else { + throw new Error('Could not retrieve auth token'); + } + } catch (err) { + console.error(err); + channelPort?.postMessage({ + result: 'error', + error: { + code: 'tokenError', + message: 'Failed to get token' + }, + isExpected: true + }); + } + break; + } + + case 'close': { + // 사용자가 취소하거나 닫았을 경우 + cleanup(); + resolve(null); + break; + } + + case 'pick': { + // 사용자가 파일 선택 완료 + console.log('Picked:', command); + /** + * command 안에는 사용자가 선택한 파일들의 메타데이터 정보가 들어있습니다. + * 필요하다면 Microsoft Graph API 등을 통해 Blob(실제 파일 데이터)을 받아와야 할 수 있습니다. + */ + + // picker에 응답 + channelPort?.postMessage({ + type: 'result', + id: portData.id, + data: { + result: 'success' + } + }); + + // 선택한 파일들(메타정보)을 resolve + cleanup(); + resolve(command); + break; + } + + default: { + console.warn('Unsupported command:', command); + channelPort?.postMessage({ + result: 'error', + error: { + code: 'unsupportedCommand', + message: command.command + }, + isExpected: true + }); + break; + } + } + break; + } + } + }; + + function cleanup() { + window.removeEventListener('message', handleWindowMessage); + if (channelPort) { + channelPort.removeEventListener('message', handlePortMessage); + } + if (pickerWindow) { + pickerWindow.close(); + pickerWindow = null; + } + } + + // 메시지 이벤트 등록 + window.addEventListener('message', handleWindowMessage); + } catch (err) { + if (pickerWindow) pickerWindow.close(); + reject(err); + } + }); +} From 8c020488ddf9c60a876686a46ea7f666542159be Mon Sep 17 00:00:00 2001 From: grand Date: Mon, 24 Feb 2025 12:04:06 +0100 Subject: [PATCH 056/623] * fix: restore compatibility for older o1 models (o1-mini, o1-preview) --- backend/open_webui/routers/openai.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index 78aae998064..69232e3feb0 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -84,9 +84,15 @@ def openai_o1_o3_handler(payload): payload["max_completion_tokens"] = payload["max_tokens"] del payload["max_tokens"] - # Fix: o1 and o3 do not support the "system" parameter. Modify "system" to "developer" + # Fix: o1 and o3 do not support the "system" role directly. + # For older models like "o1-mini" or "o1-preview", use role "user". + # For newer o1/o3 models, replace "system" with "developer". if payload["messages"][0]["role"] == "system": - payload["messages"][0]["role"] = "developer" + model_lower = payload["model"].lower() + if model_lower.startswith("o1-mini") or model_lower.startswith("o1-preview"): + payload["messages"][0]["role"] = "user" + else: + payload["messages"][0]["role"] = "developer" return payload From 0f143c063d1586a703a12760a7bd28cd78ce95a0 Mon Sep 17 00:00:00 2001 From: Nabeel Raza <61087524+nabeelraza-7@users.noreply.github.com> Date: Mon, 24 Feb 2025 18:53:46 +0500 Subject: [PATCH 057/623] feat: searxng setup --- backend/open_webui/constants.py | 17 ++++++++++- docker-compose.yaml | 50 +++++++++++++----------------- searxng/settings.yml | 35 +++++++++++++++++++++ searxng/uwsgi.ini | 54 +++++++++++++++++++++++++++++++++ 4 files changed, 126 insertions(+), 30 deletions(-) create mode 100644 searxng/settings.yml create mode 100644 searxng/uwsgi.ini diff --git a/backend/open_webui/constants.py b/backend/open_webui/constants.py index 48db56ea015..c47661f1c97 100644 --- a/backend/open_webui/constants.py +++ b/backend/open_webui/constants.py @@ -1,9 +1,24 @@ from enum import Enum import os +from pathlib import Path +#################################### +# Load .env file +#################################### +OPEN_WEBUI_DIR = Path(__file__).parent # the path containing this file -GOOGLE_SHEET_CREDENTIALS = os.environ['GOOGLE_SHEET_CREDENTIALS'] +BACKEND_DIR = OPEN_WEBUI_DIR.parent # the path containing this file +BASE_DIR = BACKEND_DIR.parent # the path containing the backend/ + +try: + from dotenv import find_dotenv, load_dotenv + + load_dotenv(find_dotenv(str(BASE_DIR / ".env"))) +except ImportError: + print("dotenv not installed, skipping...") + +GOOGLE_SHEET_CREDENTIALS = os.environ["GOOGLE_SHEET_CREDENTIALS"] class MESSAGES(str, Enum): diff --git a/docker-compose.yaml b/docker-compose.yaml index 5db32b2bf7f..8820d24348e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -12,7 +12,6 @@ services: - RAG_WEB_SEARCH_RESULT_COUNT=${RAG_WEB_SEARCH_RESULT_COUNT} - RAG_WEB_SEARCH_CONCURRENT_REQUESTS=${RAG_WEB_SEARCH_CONCURRENT_REQUESTS} - SEARXNG_QUERY_URL=${SEARXNG_QUERY_URL} - ports: - "3000:8080" volumes: @@ -20,34 +19,27 @@ services: restart: always env_file: - .env - # searxng: - # container_name: searxng - # image: searxng/searxng:latest - # environment: - # - SEARXNG_UWSGI_WORKERS=${SEARXNG_UWSGI_WORKERS} - # - SEARXNG_UWSGI_THREADS=${SEARXNG_UWSGI_THREADS} - # - SEARXNG_HOSTNAME=${SEARXNG_HOSTNAME} - # ports: - # - "8080:8080" - # volumes: - # - searxng-data:/etc/searxng:rw - # - ./searx-settings.yml:/usr/local/searxng/searx/settings.yml:ro - # env_file: - # - .env - # restart: always - # cap_drop: - # - ALL - # cap_add: - # - CHOWN - # - SETGID - # - SETUID - # - DAC_OVERRIDE - # logging: - # driver: "json-file" - # options: - # max-size: "1m" - # max-file: "1" + depends_on: + - searxng + networks: + - shared-network + + searxng: + container_name: searxng + image: searxng/searxng + ports: + - "8081:8080" + volumes: + - ./searxng:/etc/searxng:rw + env_file: + - .env + restart: unless-stopped + networks: + - shared-network volumes: open-webui-data: - # searxng-data: + +networks: + shared-network: + external: true diff --git a/searxng/settings.yml b/searxng/settings.yml new file mode 100644 index 00000000000..70a67d0b63e --- /dev/null +++ b/searxng/settings.yml @@ -0,0 +1,35 @@ +use_default_settings: true +server: + # Is overwritten by ${SEARXNG_PORT} and ${SEARXNG_BIND_ADDRESS} + port: 8080 + bind_address: "0.0.0.0" + # public URL of the instance, to ensure correct inbound links. Is overwritten + # by ${SEARXNG_URL}. + base_url: false # "http://example.com/location" + # rate limit the number of request on the instance, block some bots. + # Is overwritten by ${SEARXNG_LIMITER} + limiter: false + # enable features designed only for public instances. + # Is overwritten by ${SEARXNG_PUBLIC_INSTANCE} + public_instance: false + + # If your instance owns a /etc/searxng/settings.yml file, then set the following + # values there. + + secret_key: "ursecretkey" # Is overwritten by ${SEARXNG_SECRET} + # Proxy image results through SearXNG. Is overwritten by ${SEARXNG_IMAGE_PROXY} + image_proxy: false + # 1.0 and 1.1 are supported + http_protocol_version: "1.0" + # POST queries are more secure as they don't show up in history but may cause + # problems when using Firefox containers + method: "GET" + default_http_headers: + X-Content-Type-Options: nosniff + X-Download-Options: noopen + X-Robots-Tag: noindex, nofollow + Referrer-Policy: no-referrer + +search: + formats: + - json diff --git a/searxng/uwsgi.ini b/searxng/uwsgi.ini new file mode 100644 index 00000000000..9db3d762649 --- /dev/null +++ b/searxng/uwsgi.ini @@ -0,0 +1,54 @@ +[uwsgi] +# Who will run the code +uid = searxng +gid = searxng + +# Number of workers (usually CPU count) +# default value: %k (= number of CPU core, see Dockerfile) +workers = %k + +# Number of threads per worker +# default value: 4 (see Dockerfile) +threads = 4 + +# The right granted on the created socket +chmod-socket = 666 + +# Plugin to use and interpreter config +single-interpreter = true +master = true +plugin = python3 +lazy-apps = true +enable-threads = 4 + +# Module to import +module = searx.webapp + +# Virtualenv and python path +pythonpath = /usr/local/searxng/ +chdir = /usr/local/searxng/searx/ + +# automatically set processes name to something meaningful +auto-procname = true + +# Disable request logging for privacy +disable-logging = true +log-5xx = true + +# Set the max size of a request (request-body excluded) +buffer-size = 8192 + +# No keep alive +# See https://github.com/searx/searx-docker/issues/24 +add-header = Connection: close + +# Follow SIGTERM convention +# See https://github.com/searxng/searxng/issues/3427 +die-on-term + +# uwsgi serves the static files +static-map = /static=/usr/local/searxng/searx/static +# expires set to one day +static-expires = /* 86400 +static-gzip-all = True +offload-threads = 4 From 4cc3102758e96de41f3b6204d45e4094d0b3aaa6 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Mon, 24 Feb 2025 23:14:10 +0900 Subject: [PATCH 058/623] feat: onedrive file picker integration --- backend/open_webui/config.py | 12 + backend/open_webui/main.py | 8 + backend/open_webui/routers/retrieval.py | 9 + package-lock.json | 22 - package.json | 1 - src/lib/apis/retrieval/index.ts | 1 + .../admin/Settings/Documents.svelte | 15 + src/lib/components/chat/MessageInput.svelte | 11 +- .../chat/MessageInput/InputMenu.svelte | 47 +- src/lib/stores/index.ts | 1 + src/lib/utils/onedrive-auth.ts | 42 -- src/lib/utils/onedrive-file-picker.ts | 447 ++++++++++-------- 12 files changed, 329 insertions(+), 287 deletions(-) delete mode 100644 src/lib/utils/onedrive-auth.ts diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index b2f8dccca9e..91cc2e99279 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1570,6 +1570,18 @@ class BannerModel(BaseModel): os.environ.get("GOOGLE_DRIVE_API_KEY", ""), ) +ENABLE_ONEDRIVE_INTEGRATION = PersistentConfig( + "ENABLE_ONEDRIVE_INTEGRATION", + "onedrive.enable", + os.getenv("ENABLE_ONEDRIVE_INTEGRATION", "False").lower() == "true", +) + +ONEDRIVE_CLIENT_ID = PersistentConfig( + "ONEDRIVE_CLIENT_ID", + "onedrive.client_id", + os.environ.get("ONEDRIVE_CLIENT_ID", ""), +) + # RAG Content Extraction CONTENT_EXTRACTION_ENGINE = PersistentConfig( "CONTENT_EXTRACTION_ENGINE", diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 1371f7d158e..62e53e34c6b 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -95,6 +95,7 @@ OLLAMA_API_CONFIGS, # OpenAI ENABLE_OPENAI_API, + ONEDRIVE_CLIENT_ID, OPENAI_API_BASE_URLS, OPENAI_API_KEYS, OPENAI_API_CONFIGS, @@ -217,11 +218,13 @@ GOOGLE_PSE_ENGINE_ID, GOOGLE_DRIVE_CLIENT_ID, GOOGLE_DRIVE_API_KEY, + ONEDRIVE_CLIENT_ID, ENABLE_RAG_HYBRID_SEARCH, ENABLE_RAG_LOCAL_WEB_FETCH, ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION, ENABLE_RAG_WEB_SEARCH, ENABLE_GOOGLE_DRIVE_INTEGRATION, + ENABLE_ONEDRIVE_INTEGRATION, UPLOAD_DIR, # WebUI WEBUI_AUTH, @@ -568,6 +571,7 @@ async def lifespan(app: FastAPI): app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = RAG_WEB_SEARCH_DOMAIN_FILTER_LIST app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ENABLE_GOOGLE_DRIVE_INTEGRATION +app.state.config.ENABLE_ONEDRIVE_INTEGRATION = ENABLE_ONEDRIVE_INTEGRATION app.state.config.SEARXNG_QUERY_URL = SEARXNG_QUERY_URL app.state.config.GOOGLE_PSE_API_KEY = GOOGLE_PSE_API_KEY app.state.config.GOOGLE_PSE_ENGINE_ID = GOOGLE_PSE_ENGINE_ID @@ -1150,6 +1154,7 @@ async def get_app_config(request: Request): "enable_admin_export": ENABLE_ADMIN_EXPORT, "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS, "enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, + "enable_onedrive_integration": app.state.config.ENABLE_ONEDRIVE_INTEGRATION, } if user is not None else {} @@ -1181,6 +1186,9 @@ async def get_app_config(request: Request): "client_id": GOOGLE_DRIVE_CLIENT_ID.value, "api_key": GOOGLE_DRIVE_API_KEY.value, }, + "onedrive": { + "client_id": ONEDRIVE_CLIENT_ID.value + } } if user is not None else {} diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index c2cb68c5d66..51f77d6b11c 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -353,6 +353,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "pdf_extract_images": request.app.state.config.PDF_EXTRACT_IMAGES, "RAG_FULL_CONTEXT": request.app.state.config.RAG_FULL_CONTEXT, "enable_google_drive_integration": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, + "enable_onedrive_integration": request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION, "content_extraction": { "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE, "tika_server_url": request.app.state.config.TIKA_SERVER_URL, @@ -381,6 +382,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "search": { "enabled": request.app.state.config.ENABLE_RAG_WEB_SEARCH, "drive": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, + "onedrive": request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION, "engine": request.app.state.config.RAG_WEB_SEARCH_ENGINE, "searxng_query_url": request.app.state.config.SEARXNG_QUERY_URL, "google_pse_api_key": request.app.state.config.GOOGLE_PSE_API_KEY, @@ -478,6 +480,7 @@ class ConfigUpdateForm(BaseModel): RAG_FULL_CONTEXT: Optional[bool] = None pdf_extract_images: Optional[bool] = None enable_google_drive_integration: Optional[bool] = None + enable_onedrive_integration: Optional[bool] = None file: Optional[FileConfig] = None content_extraction: Optional[ContentExtractionConfig] = None chunk: Optional[ChunkParamUpdateForm] = None @@ -507,6 +510,12 @@ async def update_rag_config( else request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION ) + request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION = ( + form_data.enable_onedrive_integration + if form_data.enable_onedrive_integration is not None + else request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION + ) + if form_data.file is not None: request.app.state.config.FILE_MAX_SIZE = form_data.file.max_size request.app.state.config.FILE_MAX_COUNT = form_data.file.max_count diff --git a/package-lock.json b/package-lock.json index 066cf2be53f..c6587077219 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,7 +8,6 @@ "name": "open-webui", "version": "0.5.16", "dependencies": { - "@azure/msal-browser": "^4.4.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", @@ -135,27 +134,6 @@ "node": ">=6.0.0" } }, - "node_modules/@azure/msal-browser": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.4.0.tgz", - "integrity": "sha512-rU6juYXk67CKQmpgi6fDgZoPQ9InZ1760z1BSAH7RbeIc4lHZM/Tu+H0CyRk7cnrfvTkexyYE4pjYhMghpzheA==", - "license": "MIT", - "dependencies": { - "@azure/msal-common": "15.2.0" - }, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@azure/msal-common": { - "version": "15.2.0", - "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.2.0.tgz", - "integrity": "sha512-HiYfGAKthisUYqHG1nImCf/uzcyS31wng3o+CycWLIM9chnYJ9Lk6jZ30Y6YiYYpTQ9+z/FGUpiKKekd3Arc0A==", - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, "node_modules/@babel/runtime": { "version": "7.24.1", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz", diff --git a/package.json b/package.json index 0e8fe2bc6fd..86568869ffb 100644 --- a/package.json +++ b/package.json @@ -51,7 +51,6 @@ }, "type": "module", "dependencies": { - "@azure/msal-browser": "^4.4.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", diff --git a/src/lib/apis/retrieval/index.ts b/src/lib/apis/retrieval/index.ts index ed07ab5d0dd..31317fe0b9c 100644 --- a/src/lib/apis/retrieval/index.ts +++ b/src/lib/apis/retrieval/index.ts @@ -52,6 +52,7 @@ type YoutubeConfigForm = { type RAGConfigForm = { pdf_extract_images?: boolean; enable_google_drive_integration?: boolean; + enable_onedrive_integration?: boolean; chunk?: ChunkConfigForm; content_extraction?: ContentExtractConfigForm; web_loader_ssl_verification?: boolean; diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index b790863097b..248f6e9f55f 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -61,6 +61,7 @@ let RAG_FULL_CONTEXT = false; let enableGoogleDriveIntegration = false; + let enableOneDriveIntegration = false; let OpenAIUrl = ''; let OpenAIKey = ''; @@ -189,6 +190,7 @@ const res = await updateRAGConfig(localStorage.token, { pdf_extract_images: pdfExtractImages, enable_google_drive_integration: enableGoogleDriveIntegration, + enable_onedrive_integration: enableOneDriveIntegration, file: { max_size: fileMaxSize === '' ? null : fileMaxSize, max_count: fileMaxCount === '' ? null : fileMaxCount @@ -271,6 +273,7 @@ fileMaxCount = res?.file.max_count ?? ''; enableGoogleDriveIntegration = res.enable_google_drive_integration; + enableOneDriveIntegration = res.enable_onedrive_integration; } }); @@ -653,6 +656,18 @@ +
{$i18n.t('OneDrive')}
+ +
+
+
{$i18n.t('Enable OneDrive')}
+
+ +
+
+
+ +
diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index a81139d2f0c..bf2f5cddbea 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -2,7 +2,7 @@ import { toast } from 'svelte-sonner'; import { v4 as uuidv4 } from 'uuid'; import { createPicker, getAuthToken } from '$lib/utils/google-drive-picker'; - import { openOneDrivePicker } from '$lib/utils/onedrive-file-picker'; + import { pickAndDownloadFile } from '$lib/utils/onedrive-file-picker'; import { onMount, tick, getContext, createEventDispatcher, onDestroy } from 'svelte'; const dispatch = createEventDispatcher(); @@ -1111,10 +1111,10 @@ }} uploadOneDriveHandler={async () => { try { - const fileData = await openOneDrivePicker(); + const fileData = await pickAndDownloadFile(); if (fileData) { const file = new File([fileData.blob], fileData.name, { - type: fileData.blob.type + type: fileData.blob.type || 'application/octet-stream' }); await uploadFileHandler(file); } else { @@ -1122,11 +1122,6 @@ } } catch (error) { console.error('OneDrive Error:', error); - toast.error( - $i18n.t('Error accessing OneDrive: {{error}}', { - error: error.message - }) - ); } }} onClose={async () => { diff --git a/src/lib/components/chat/MessageInput/InputMenu.svelte b/src/lib/components/chat/MessageInput/InputMenu.svelte index 91f9cf81b4a..7f7660f1902 100644 --- a/src/lib/components/chat/MessageInput/InputMenu.svelte +++ b/src/lib/components/chat/MessageInput/InputMenu.svelte @@ -228,30 +228,41 @@ {/if} - {#if $config?.features?.enable_onedrive_integration || true} + {#if $config?.features?.enable_onedrive_integration} { uploadOneDriveHandler(); }} > - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + +
{$i18n.t('OneDrive')}
diff --git a/src/lib/stores/index.ts b/src/lib/stores/index.ts index f96670cb621..1f6b400e079 100644 --- a/src/lib/stores/index.ts +++ b/src/lib/stores/index.ts @@ -204,6 +204,7 @@ type Config = { enable_login_form: boolean; enable_web_search?: boolean; enable_google_drive_integration: boolean; + enable_onedrive_integration: boolean; enable_image_generation: boolean; enable_admin_export: boolean; enable_admin_chat_access: boolean; diff --git a/src/lib/utils/onedrive-auth.ts b/src/lib/utils/onedrive-auth.ts deleted file mode 100644 index be2de44a013..00000000000 --- a/src/lib/utils/onedrive-auth.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { PublicClientApplication } from '@azure/msal-browser'; - -const msalParams = { - auth: { - authority: 'https://login.microsoftonline.com/consumers', - clientId: '2ab80a1e-7300-4cb1-beac-c38c730e8b7f' - } -}; - -// MSAL 초기화 -const app = new PublicClientApplication(msalParams); - -export async function initializeMsal() { - try { - await app.initialize(); - console.log('MSAL initialized successfully'); - } catch (error) { - console.error('MSAL initialization error:', error); - } - } - - export async function getToken(): Promise { - const authParams = { scopes: ['OneDrive.ReadWrite'] }; - let accessToken = ''; - - try { - // Ensure initialization happens early - await initializeMsal(); - const resp = await app.acquireTokenSilent(authParams); - accessToken = resp.accessToken; - } catch (err) { - const resp = await app.loginPopup(authParams); - app.setActiveAccount(resp.account); - - if (resp.idToken) { - const resp2 = await app.acquireTokenSilent(authParams); - accessToken = resp2.accessToken; - } - } - - return accessToken; - } diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index d003e38ec14..e3a80c9124c 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -1,211 +1,266 @@ -// src/lib/utils/onedrive-file-picker.ts -import { getToken } from './onedrive-auth'; +let CLIENT_ID = ''; +async function getCredentials() { + if (CLIENT_ID) return; + const response = await fetch('/api/config'); + if (!response.ok) { + throw new Error('Failed to fetch OneDrive credentials'); + } + const config = await response.json(); + CLIENT_ID = config.onedrive?.client_id; + if (!CLIENT_ID) { + throw new Error('OneDrive client ID not configured'); + } +} -const baseUrl = "https://onedrive.live.com/picker"; -const params = { - sdk: '8.0', - entry: { - oneDrive: { - files: {} - } - }, - authentication: {}, - messaging: { - origin: 'http://localhost:3000', // 현재 부모 페이지의 origin - channelId: '27' // 메시징 채널용 임의의 ID - }, - typesAndSources: { - mode: 'files', - pivots: { - oneDrive: true, - recent: true - } - } -}; - -/** - * OneDrive 파일 피커 창을 열고, 사용자가 선택한 파일 메타데이터를 받아오는 함수 - */ -export async function openOneDrivePicker(): Promise { - // SSR 환경(SvelteKit)에서 window 객체가 없을 수 있으므로 가드 - if (typeof window === 'undefined') { - throw new Error('Not in browser environment'); - } - - return new Promise(async (resolve, reject) => { - let pickerWindow: Window | null = null; - let channelPort: MessagePort | null = null; - - try { - const authToken = await getToken(); - if (!authToken) { - return reject(new Error('Failed to acquire access token')); - } - - // 팝업 창 오픈 - pickerWindow = window.open('', 'OneDrivePicker', 'width=800,height=600'); - if (!pickerWindow) { - return reject(new Error('Failed to open OneDrive picker window')); - } - - // 쿼리스트링 구성 - const queryString = new URLSearchParams({ - filePicker: JSON.stringify(params) - }); - const url = `${baseUrl}?${queryString.toString()}`; - - // 새로 연 window에 form을 동적으로 추가하여 POST - const form = pickerWindow.document.createElement('form'); - form.setAttribute('action', url); - form.setAttribute('method', 'POST'); - - const input = pickerWindow.document.createElement('input'); - input.setAttribute('type', 'hidden'); - input.setAttribute('name', 'access_token'); - input.setAttribute('value', authToken); - - form.appendChild(input); - pickerWindow.document.body.appendChild(form); - form.submit(); - - // 부모 창에서 message 이벤트 수신 - const handleWindowMessage = (event: MessageEvent) => { - // pickerWindow가 아닌 다른 window에서 온 메시지는 무시 - if (event.source !== pickerWindow) return; - - const message = event.data; - - // 초기화 메시지 => SharedWorker(MessageChannel) 식으로 포트 받기 - if ( - message?.type === 'initialize' && - message?.channelId === params.messaging.channelId - ) { - channelPort = event.ports?.[0]; - if (!channelPort) return; - - channelPort.addEventListener('message', handlePortMessage); - channelPort.start(); +function loadMsalScript(): Promise { + return new Promise((resolve, reject) => { + const win = window; + if (win.msal) { + resolve(); + return; + } + const script = document.createElement('script'); + script.src = 'https://alcdn.msauth.net/browser/2.19.0/js/msal-browser.min.js'; + script.async = true; + script.onload = () => resolve(); + script.onerror = () => reject(new Error('Failed to load MSAL script')); + document.head.appendChild(script); + }); +} - // picker iframe에 'activate' 전달 - channelPort.postMessage({ - type: 'activate' - }); - } - }; +let msalInstance: any; - // 포트 메시지 핸들러 - const handlePortMessage = async (portEvent: MessageEvent) => { - const portData = portEvent.data; - switch (portData.type) { - case 'notification': - console.log('notification:', portData); - break; +// Initialize MSAL authentication +async function initializeMsal() { + if (!CLIENT_ID) { + await getCredentials(); + } + const msalParams = { + auth: { + authority: 'https://login.microsoftonline.com/consumers', + clientId: CLIENT_ID + } + }; + try { + await loadMsalScript(); + const win = window; + msalInstance = new win.msal.PublicClientApplication(msalParams); + if (msalInstance.initialize) { + await msalInstance.initialize(); + } + } catch (error) { + console.error('MSAL initialization error:', error); + } +} - case 'command': { - // picker에 응답 - channelPort?.postMessage({ - type: 'acknowledge', - id: portData.id - }); +// Retrieve OneDrive access token +async function getToken(): Promise { + const authParams = { scopes: ['OneDrive.ReadWrite'] }; + let accessToken = ''; + try { + await initializeMsal(); + const resp = await msalInstance.acquireTokenSilent(authParams); + accessToken = resp.accessToken; + } catch (err) { + const resp = await msalInstance.loginPopup(authParams); + msalInstance.setActiveAccount(resp.account); + if (resp.idToken) { + const resp2 = await msalInstance.acquireTokenSilent(authParams); + accessToken = resp2.accessToken; + } + } + return accessToken; +} - const command = portData.data; +const baseUrl = "https://onedrive.live.com/picker"; +const params = { + sdk: '8.0', + entry: { + oneDrive: { + files: {} + } + }, + authentication: {}, + messaging: { + origin: window?.location?.origin, + channelId: crypto.randomUUID() + }, + typesAndSources: { + mode: 'files', + pivots: { + oneDrive: true, + recent: true + } + } +}; - switch (command.command) { - case 'authenticate': { - // 재인증 - try { - const newToken = await getToken(); - if (newToken) { - channelPort?.postMessage({ - type: 'result', - id: portData.id, - data: { - result: 'token', - token: newToken - } - }); - } else { - throw new Error('Could not retrieve auth token'); - } - } catch (err) { - console.error(err); - channelPort?.postMessage({ - result: 'error', - error: { - code: 'tokenError', - message: 'Failed to get token' - }, - isExpected: true - }); - } - break; - } +// Download file from OneDrive +async function downloadOneDriveFile(fileInfo: any): Promise { + const accessToken = await getToken(); + if (!accessToken) { + throw new Error('Unable to retrieve OneDrive access token.'); + } + const fileInfoUrl = `${fileInfo["@sharePoint.endpoint"]}/drives/${fileInfo.parentReference.driveId}/items/${fileInfo.id}`; + const response = await fetch(fileInfoUrl, { + headers: { + 'Authorization': `Bearer ${accessToken}` + } + }); + if (!response.ok) { + throw new Error('Failed to fetch file information.'); + } + const fileData = await response.json(); + const downloadUrl = fileData['@content.downloadUrl']; + const downloadResponse = await fetch(downloadUrl); + if (!downloadResponse.ok) { + throw new Error('Failed to download file.'); + } + return await downloadResponse.blob(); +} - case 'close': { - // 사용자가 취소하거나 닫았을 경우 - cleanup(); - resolve(null); - break; - } +// Open OneDrive file picker and return selected file metadata +export async function openOneDrivePicker(): Promise { + if (typeof window === 'undefined') { + throw new Error('Not in browser environment'); + } + return new Promise((resolve, reject) => { + let pickerWindow: Window | null = null; + let channelPort: MessagePort | null = null; - case 'pick': { - // 사용자가 파일 선택 완료 - console.log('Picked:', command); - /** - * command 안에는 사용자가 선택한 파일들의 메타데이터 정보가 들어있습니다. - * 필요하다면 Microsoft Graph API 등을 통해 Blob(실제 파일 데이터)을 받아와야 할 수 있습니다. - */ + const handleWindowMessage = (event: MessageEvent) => { + if (event.source !== pickerWindow) return; + const message = event.data; + if (message?.type === 'initialize' && message?.channelId === params.messaging.channelId) { + channelPort = event.ports?.[0]; + if (!channelPort) return; + channelPort.addEventListener('message', handlePortMessage); + channelPort.start(); + channelPort.postMessage({ type: 'activate' }); + } + }; - // picker에 응답 - channelPort?.postMessage({ - type: 'result', - id: portData.id, - data: { - result: 'success' - } - }); + const handlePortMessage = async (portEvent: MessageEvent) => { + const portData = portEvent.data; + switch (portData.type) { + case 'notification': + break; + case 'command': { + channelPort?.postMessage({ type: 'acknowledge', id: portData.id }); + const command = portData.data; + switch (command.command) { + case 'authenticate': { + try { + const newToken = await getToken(); + if (newToken) { + channelPort?.postMessage({ + type: 'result', + id: portData.id, + data: { result: 'token', token: newToken } + }); + } else { + throw new Error('Could not retrieve auth token'); + } + } catch (err) { + console.error(err); + channelPort?.postMessage({ + result: 'error', + error: { code: 'tokenError', message: 'Failed to get token' }, + isExpected: true + }); + } + break; + } + case 'close': { + cleanup(); + resolve(null); + break; + } + case 'pick': { + channelPort?.postMessage({ + type: 'result', + id: portData.id, + data: { result: 'success' } + }); + cleanup(); + resolve(command); + break; + } + default: { + console.warn('Unsupported command:', command); + channelPort?.postMessage({ + result: 'error', + error: { code: 'unsupportedCommand', message: command.command }, + isExpected: true + }); + break; + } + } + break; + } + } + }; - // 선택한 파일들(메타정보)을 resolve - cleanup(); - resolve(command); - break; - } + function cleanup() { + window.removeEventListener('message', handleWindowMessage); + if (channelPort) { + channelPort.removeEventListener('message', handlePortMessage); + } + if (pickerWindow) { + pickerWindow.close(); + pickerWindow = null; + } + } - default: { - console.warn('Unsupported command:', command); - channelPort?.postMessage({ - result: 'error', - error: { - code: 'unsupportedCommand', - message: command.command - }, - isExpected: true - }); - break; - } - } - break; - } - } - }; + const initializePicker = async () => { + try { + const authToken = await getToken(); + if (!authToken) { + return reject(new Error('Failed to acquire access token')); + } + pickerWindow = window.open('', 'OneDrivePicker', 'width=800,height=600'); + if (!pickerWindow) { + return reject(new Error('Failed to open OneDrive picker window')); + } + const queryString = new URLSearchParams({ + filePicker: JSON.stringify(params) + }); + const url = `${baseUrl}?${queryString.toString()}`; + const form = pickerWindow.document.createElement('form'); + form.setAttribute('action', url); + form.setAttribute('method', 'POST'); + const input = pickerWindow.document.createElement('input'); + input.setAttribute('type', 'hidden'); + input.setAttribute('name', 'access_token'); + input.setAttribute('value', authToken); + form.appendChild(input); + pickerWindow.document.body.appendChild(form); + form.submit(); + window.addEventListener('message', handleWindowMessage); + } catch (err) { + if (pickerWindow) pickerWindow.close(); + reject(err); + } + }; - function cleanup() { - window.removeEventListener('message', handleWindowMessage); - if (channelPort) { - channelPort.removeEventListener('message', handlePortMessage); - } - if (pickerWindow) { - pickerWindow.close(); - pickerWindow = null; - } - } + initializePicker(); + }); +} - // 메시지 이벤트 등록 - window.addEventListener('message', handleWindowMessage); - } catch (err) { - if (pickerWindow) pickerWindow.close(); - reject(err); - } - }); +// Pick and download file from OneDrive +export async function pickAndDownloadFile(): Promise<{ blob: Blob; name: string } | null> { + try { + const pickerResult = await openOneDrivePicker(); + if (!pickerResult || !pickerResult.items || pickerResult.items.length === 0) { + return null; + } + const selectedFile = pickerResult.items[0]; + const blob = await downloadOneDriveFile(selectedFile); + return { blob, name: selectedFile.name }; + } catch (error) { + console.error('Error occurred during OneDrive file pick/download:', error); + throw error; + } } + +export { downloadOneDriveFile }; From b2422b3c8fe5cc3bf797482cce84664c12687e14 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Tue, 25 Feb 2025 01:56:33 +0900 Subject: [PATCH 059/623] i18n: onedrive related --- src/lib/i18n/locales/ar-BH/translation.json | 14 +++++++++++-- src/lib/i18n/locales/bg-BG/translation.json | 14 +++++++++++-- src/lib/i18n/locales/bn-BD/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ca-ES/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ceb-PH/translation.json | 14 +++++++++++-- src/lib/i18n/locales/cs-CZ/translation.json | 14 +++++++++++-- src/lib/i18n/locales/da-DK/translation.json | 14 +++++++++++-- src/lib/i18n/locales/de-DE/translation.json | 14 +++++++++++-- src/lib/i18n/locales/dg-DG/translation.json | 14 +++++++++++-- src/lib/i18n/locales/el-GR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/en-GB/translation.json | 14 +++++++++++-- src/lib/i18n/locales/en-US/translation.json | 14 +++++++++++-- src/lib/i18n/locales/es-ES/translation.json | 14 +++++++++++-- src/lib/i18n/locales/eu-ES/translation.json | 14 +++++++++++-- src/lib/i18n/locales/fa-IR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/fi-FI/translation.json | 14 +++++++++++-- src/lib/i18n/locales/fr-CA/translation.json | 14 +++++++++++-- src/lib/i18n/locales/fr-FR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/he-IL/translation.json | 14 +++++++++++-- src/lib/i18n/locales/hi-IN/translation.json | 14 +++++++++++-- src/lib/i18n/locales/hr-HR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/hu-HU/translation.json | 14 +++++++++++-- src/lib/i18n/locales/id-ID/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ie-GA/translation.json | 14 +++++++++++-- src/lib/i18n/locales/it-IT/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ja-JP/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ka-GE/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ko-KR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/lt-LT/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ms-MY/translation.json | 14 +++++++++++-- src/lib/i18n/locales/nb-NO/translation.json | 14 +++++++++++-- src/lib/i18n/locales/nl-NL/translation.json | 14 +++++++++++-- src/lib/i18n/locales/pa-IN/translation.json | 14 +++++++++++-- src/lib/i18n/locales/pl-PL/translation.json | 22 +++++++++++++++++--- src/lib/i18n/locales/pt-BR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/pt-PT/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ro-RO/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ru-RU/translation.json | 14 +++++++++++-- src/lib/i18n/locales/sk-SK/translation.json | 14 +++++++++++-- src/lib/i18n/locales/sr-RS/translation.json | 14 +++++++++++-- src/lib/i18n/locales/sv-SE/translation.json | 14 +++++++++++-- src/lib/i18n/locales/th-TH/translation.json | 14 +++++++++++-- src/lib/i18n/locales/tk-TW/translation.json | 14 +++++++++++-- src/lib/i18n/locales/tr-TR/translation.json | 14 +++++++++++-- src/lib/i18n/locales/uk-UA/translation.json | 14 +++++++++++-- src/lib/i18n/locales/ur-PK/translation.json | 14 +++++++++++-- src/lib/i18n/locales/vi-VN/translation.json | 14 +++++++++++-- src/lib/i18n/locales/zh-CN/translation.json | 14 +++++++++++-- src/lib/i18n/locales/zh-TW/translation.json | 14 +++++++++++-- 49 files changed, 595 insertions(+), 99 deletions(-) diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index d9576d41a19..a43af5d359a 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "يتم استخدام نموذج المهمة عند تنفيذ مهام مثل إنشاء عناوين للدردشات واستعلامات بحث الويب", "a user": "مستخدم", "About": "عن", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "أرفق ملف", + "Attach file from knowledge": "", "Attention to detail": "انتبه للتفاصيل", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "المستند", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "مستندات", "does not make any external connections, and your data stays securely on your locally hosted server.": "لا يجري أي اتصالات خارجية، وتظل بياناتك آمنة على الخادم المستضاف محليًا.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة", + "Enable OneDrive": "", "Enable Web Search": "تمكين بحث الويب", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "أدخل الChunk Overlap", "Enter Chunk Size": "أدخل Chunk الحجم", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "أدخل عنوان URL ل Github Raw", @@ -510,6 +516,7 @@ "General Settings": "الاعدادات العامة", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "إنشاء استعلام بحث", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "من جهة اليسار إلى اليمين", "Made by Open WebUI Community": "OpenWebUI تم إنشاؤه بواسطة مجتمع ", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama الاصدار", "On": "تشغيل", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "يُسمح فقط بالأحرف الأبجدية الرقمية والواصلات في سلسلة الأمر.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": " أختار موديل", "Select only one model to call": "", "Selected model(s) do not support image inputs": "النموذج (النماذج) المحددة لا تدعم مدخلات الصور", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "هل تواجه مشكلة في الوصول", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS اعدادات", "TTS Voice": "", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index eb06681f809..7e0f8bb49cc 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Моделът на задачите се използва при изпълнение на задачи като генериране на заглавия за чатове и заявки за търсене в мрежата", "a user": "потребител", "About": "Относно", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Достъп", "Access Control": "Контрол на достъпа", "Accessible to all users": "Достъпно за всички потребители", @@ -93,7 +94,7 @@ "Artifacts": "Артефакти", "Ask a question": "Задайте въпрос", "Assistant": "Асистент", - "Attach file": "Прикачване на файл", + "Attach file from knowledge": "", "Attention to detail": "Внимание към детайлите", "Attribute for Mail": "Атрибут за поща", "Attribute for Username": "Атрибут за потребителско име", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Не инсталирайте функции от източници, на които не се доверявате напълно.", "Do not install tools from sources you do not fully trust.": "Не инсталирайте инструменти от източници, на които не се доверявате напълно.", "Document": "Документ", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Документация", "Documents": "Документи", "does not make any external connections, and your data stays securely on your locally hosted server.": "няма външни връзки, и вашите данни остават сигурни на локално назначен сървър.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Активиране на оценяване на съобщения", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Активиране на Mirostat семплиране за контрол на перплексията. (По подразбиране: 0, 0 = Деактивирано, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Включване на нови регистрации", + "Enable OneDrive": "", "Enable Web Search": "Разрешаване на търсене в уеб", "Enabled": "Активирано", "Engine": "Двигател", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Въведете припокриване на чънкове", "Enter Chunk Size": "Въведете размер на чънк", "Enter description": "Въведете описание", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Въведете домейни, разделени със запетаи (напр. example.com,site.org)", "Enter Exa API Key": "Въведете API ключ за Exa", "Enter Github Raw URL": "Въведете URL адрес на Github Raw", @@ -510,6 +516,7 @@ "General Settings": "Основни Настройки", "Generate an image": "Генериране на изображение", "Generate Image": "Генериране на изображение", + "Generate prompt pair": "", "Generating search query": "Генериране на заявка за търсене", "Get started": "Започнете", "Get started with {{WEBUI_NAME}}": "Започнете с {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "Зареждане на Kokoro.js...", "Local": "Локално", "Local Models": "Локални модели", + "Location access not allowed": "", "Lost": "Изгубено", "LTR": "LTR", "Made by Open WebUI Community": "Направено от OpenWebUI общността", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Настройките на Ollama API са актуализирани", "Ollama Version": "Ollama Версия", "On": "Вкл.", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Разрешени са само буквено-цифрови знаци и тирета", "Only alphanumeric characters and hyphens are allowed in the command string.": "Само алфанумерични знаци и тире са разрешени в командния низ.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Само колекции могат да бъдат редактирани, създайте нова база от знания, за да редактирате/добавяте документи.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Изберете инстанция на Ollama", "Select Engine": "Изберете двигател", "Select Knowledge": "Изберете знание", - "Select model": "Изберете модел", "Select only one model to call": "Изберете само един модел за извикване", "Selected model(s) do not support image inputs": "Избраният(те) модел(и) не поддържа въвеждане на изображения", "Semantic distance to query": "Семантично разстояние до заявката", @@ -957,6 +965,7 @@ "Tags Generation": "Генериране на тагове", "Tags Generation Prompt": "Промпт за генериране на тагове", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Безопашковото семплиране се използва за намаляване на влиянието на по-малко вероятните токени от изхода. По-висока стойност (напр. 2.0) ще намали влиянието повече, докато стойност 1.0 деактивира тази настройка. (по подразбиране: 1)", + "Talk to model": "", "Tap to interrupt": "Докоснете за прекъсване", "Tasks": "Задачи", "Tavily API Key": "Tavily API Ключ", @@ -1041,6 +1050,7 @@ "Top P": "Топ P", "Transformers": "Трансформатори", "Trouble accessing Ollama?": "Проблеми с достъпа до Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS Модел", "TTS Settings": "TTS Настройки", "TTS Voice": "TTS Глас", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index 8ca1ac0d36f..adae983ce03 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "চ্যাট এবং ওয়েব অনুসন্ধান প্রশ্নের জন্য শিরোনাম তৈরি করার মতো কাজগুলি সম্পাদন করার সময় একটি টাস্ক মডেল ব্যবহার করা হয়", "a user": "একজন ব্যাবহারকারী", "About": "সম্পর্কে", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "ফাইল যুক্ত করুন", + "Attach file from knowledge": "", "Attention to detail": "বিস্তারিত বিশেষতা", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "ডকুমেন্ট", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "ডকুমেন্টসমূহ", "does not make any external connections, and your data stays securely on your locally hosted server.": "কোন এক্সটার্নাল কানেকশন তৈরি করে না, এবং আপনার ডেটা আর লোকালি হোস্টেড সার্ভারেই নিরাপদে থাকে।", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "নতুন সাইনআপ চালু করুন", + "Enable OneDrive": "", "Enable Web Search": "ওয়েব অনুসন্ধান সক্ষম করুন", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "চাঙ্ক ওভারল্যাপ লিখুন", "Enter Chunk Size": "চাংক সাইজ লিখুন", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "গিটহাব কাঁচা URL লিখুন", @@ -510,6 +516,7 @@ "General Settings": "সাধারণ সেটিংসমূহ", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "অনুসন্ধান ক্যোয়ারী তৈরি করা হচ্ছে", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "OpenWebUI কমিউনিটিকর্তৃক নির্মিত", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama ভার্সন", "On": "চালু", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "কমান্ড স্ট্রিং-এ শুধুমাত্র ইংরেজি অক্ষর, সংখ্যা এবং হাইফেন ব্যবহার করা যাবে।", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "মডেল নির্বাচন করুন", "Select only one model to call": "", "Selected model(s) do not support image inputs": "নির্বাচিত মডেল(গুলি) চিত্র ইনপুট সমর্থন করে না", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Ollama এক্সেস করতে সমস্যা হচ্ছে?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS সেটিংসমূহ", "TTS Voice": "", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 963fdcd609f..70d5ead0cea 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un model de tasca s'utilitza quan es realitzen tasques com ara generar títols per a xats i consultes de cerca per a la web", "a user": "un usuari", "About": "Sobre", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Accés", "Access Control": "Control d'accés", "Accessible to all users": "Accessible a tots els usuaris", @@ -93,7 +94,7 @@ "Artifacts": "Artefactes", "Ask a question": "Fer una pregunta", "Assistant": "Assistent", - "Attach file": "Adjuntar arxiu", + "Attach file from knowledge": "", "Attention to detail": "Atenció al detall", "Attribute for Mail": "Atribut per al Correu", "Attribute for Username": "Atribut per al Nom d'usuari", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.", "Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentació", "Documents": "Documents", "does not make any external connections, and your data stays securely on your locally hosted server.": "no realitza connexions externes, i les teves dades romanen segures al teu servidor allotjat localment.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Permetre la qualificació de missatges", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activar el mostreig de Mirostat per controlar la perplexitat. (Per defecte: 0, 0 = Inhabilitat, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Permetre nous registres", + "Enable OneDrive": "", "Enable Web Search": "Activar la cerca web", "Enabled": "Habilitat", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Introdueix la mida de solapament de blocs", "Enter Chunk Size": "Introdueix la mida del bloc", "Enter description": "Introdueix la descripció", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)", "Enter Exa API Key": "Introdueix la clau API de d'EXA", "Enter Github Raw URL": "Introdueix l'URL en brut de Github", @@ -510,6 +516,7 @@ "General Settings": "Preferències generals", "Generate an image": "Generar una imatge", "Generate Image": "Generar imatge", + "Generate prompt pair": "", "Generating search query": "Generant consulta", "Get started": "Començar", "Get started with {{WEBUI_NAME}}": "Començar amb {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "Carregant Kokoro.js", "Local": "Local", "Local Models": "Models locals", + "Location access not allowed": "", "Lost": "Perdut", "LTR": "LTR", "Made by Open WebUI Community": "Creat per la Comunitat OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "La configuració de l'API d'Ollama s'ha actualitzat", "Ollama Version": "Versió d'Ollama", "On": "Activat", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Només es permeten caràcters alfanumèrics i guions", "Only alphanumeric characters and hyphens are allowed in the command string.": "Només es permeten caràcters alfanumèrics i guions en la comanda.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Només es poden editar col·leccions, crea una nova base de coneixement per editar/afegir documents.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Seleccionar una instància d'Ollama", "Select Engine": "Seleccionar el motor", "Select Knowledge": "Seleccionar coneixement", - "Select model": "Seleccionar un model", "Select only one model to call": "Seleccionar només un model per trucar", "Selected model(s) do not support image inputs": "El(s) model(s) seleccionats no admeten l'entrada d'imatges", "Semantic distance to query": "Distància semàntica a la pregunta", @@ -957,6 +965,7 @@ "Tags Generation": "Generació d'etiquetes", "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració. (per defecte: 1)", + "Talk to model": "", "Tap to interrupt": "Prem per interrompre", "Tasks": "Tasques", "Tavily API Key": "Clau API de Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformadors", "Trouble accessing Ollama?": "Problemes en accedir a Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model TTS", "TTS Settings": "Preferències de TTS", "TTS Voice": "Veu TTS", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index daf88063731..9a30825436f 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "", "a user": "usa ka user", "About": "Mahitungod sa", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Ilakip ang usa ka file", + "Attach file from knowledge": "", "Attention to detail": "Pagtagad sa mga detalye", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Dokumento", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "Mga dokumento", "does not make any external connections, and your data stays securely on your locally hosted server.": "wala maghimo ug eksternal nga koneksyon, ug ang imong data nagpabiling luwas sa imong lokal nga host server.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "I-enable ang bag-ong mga rehistro", + "Enable OneDrive": "", "Enable Web Search": "", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Pagsulod sa block overlap", "Enter Chunk Size": "Isulod ang block size", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "", @@ -510,6 +516,7 @@ "General Settings": "kinatibuk-ang mga setting", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "", "Made by Open WebUI Community": "Gihimo sa komunidad sa OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama nga bersyon", "On": "Gipaandar", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Ang alphanumeric nga mga karakter ug hyphen lang ang gitugotan sa command string.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Pagpili og modelo", "Select only one model to call": "", "Selected model(s) do not support image inputs": "", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Ibabaw nga P", "Transformers": "", "Trouble accessing Ollama?": "Adunay mga problema sa pag-access sa Ollama?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "Mga Setting sa TTS", "TTS Voice": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index d7375910611..c832aaf2876 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model úloh se používá při provádění úloh, jako je generování názvů pro chaty a vyhledávací dotazy na webu.", "a user": "uživatel", "About": "O programu", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Přístup", "Access Control": "", "Accessible to all users": "Přístupné pro všecny uživatele", @@ -93,7 +94,7 @@ "Artifacts": "Artefakty", "Ask a question": "Zeptejte se na otázku", "Assistant": "Ano, jak vám mohu pomoci?", - "Attach file": "Připojit soubor", + "Attach file from knowledge": "", "Attention to detail": "Pozornost k detailům", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Neinstalujte funkce ze zdrojů, kterým plně nedůvěřujete.", "Do not install tools from sources you do not fully trust.": "Neinstalujte nástroje ze zdrojů, kterým plně nedůvěřujete.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentace", "Documents": "Dokumenty", "does not make any external connections, and your data stays securely on your locally hosted server.": "nevytváří žádná externí připojení a vaše data zůstávají bezpečně na vašem lokálním serveru.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Povolit hodnocení zpráv", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Povolit nové registrace", + "Enable OneDrive": "", "Enable Web Search": "Povolit webové vyhledávání", "Enabled": "Povoleno", "Engine": "Engine", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Zadejte překryv části", "Enter Chunk Size": "Zadejte velikost bloku", "Enter description": "Zadejte popis", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Zadejte URL adresu Github Raw", @@ -510,6 +516,7 @@ "General Settings": "Obecná nastavení", "Generate an image": "", "Generate Image": "Vygenerovat obrázek", + "Generate prompt pair": "", "Generating search query": "Generování vyhledávacího dotazu", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokální modely", + "Location access not allowed": "", "Lost": "Ztracený", "LTR": "LTR", "Made by Open WebUI Community": "Vytvořeno komunitou OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Verze Ollama", "On": "Na", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Příkazový řetězec smí obsahovat pouze alfanumerické znaky a pomlčky.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Pouze kolekce mohou být upravovány, pro úpravu/přidání dokumentů vytvořte novou znalostní bázi.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Vyberte engine", "Select Knowledge": "Vybrat znalosti", - "Select model": "Vyberte model", "Select only one model to call": "Vyberte pouze jeden model, který chcete použít", "Selected model(s) do not support image inputs": "Vybraný(é) model(y) nepodporují vstupy v podobě obrázků.", "Semantic distance to query": "Semantická vzdálenost k dotazu", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Prompt pro generování značek", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Klepněte pro přerušení", "Tasks": "", "Tavily API Key": "Klíč API pro Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Máte potíže s přístupem k Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model převodu textu na řeč (TTS)", "TTS Settings": "Nastavení TTS (Text-to-Speech)", "TTS Voice": "TTS hlas", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 43fbd40267f..b58b62e0b7e 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "En 'task model' bliver brugt til at opgaver såsom at generere overskrifter til chats eller internetsøgninger", "a user": "en bruger", "About": "Information", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "Artifakter", "Ask a question": "Stil et spørgsmål", "Assistant": "", - "Attach file": "Vedhæft fil", + "Attach file from knowledge": "", "Attention to detail": "Detajleorientering", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Lad være med at installere funktioner fra kilder, som du ikke stoler på.", "Do not install tools from sources you do not fully trust.": "Lad være med at installere værktøjer fra kilder, som du ikke stoler på.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentation", "Documents": "Dokumenter", "does not make any external connections, and your data stays securely on your locally hosted server.": "laver ikke eksterne kald, og din data bliver sikkert på din egen lokalt hostede server.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Aktiver rating af besked", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Aktiver nye signups", + "Enable OneDrive": "", "Enable Web Search": "Aktiver websøgning", "Enabled": "Aktiveret", "Engine": "engine", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Indtast overlapning af tekststykker", "Enter Chunk Size": "Indtast størrelse af tekststykker", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Indtast Github Raw URL", @@ -510,6 +516,7 @@ "General Settings": "Generelle indstillinger", "Generate an image": "", "Generate Image": "Generer billede", + "Generate prompt pair": "", "Generating search query": "Genererer søgeforespørgsel", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokale modeller", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Lavet af OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama-version", "On": "Til", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Kun alfanumeriske tegn og bindestreger er tilladt i kommandostrengen.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Kun samlinger kan redigeres, opret en ny vidensbase for at redigere/tilføje dokumenter.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Vælg engine", "Select Knowledge": "Vælg viden", - "Select model": "Vælg model", "Select only one model to call": "Vælg kun én model at kalde", "Selected model(s) do not support image inputs": "Valgte model(ler) understøtter ikke billedinput", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Tryk for at afbryde", "Tasks": "", "Tavily API Key": "Tavily API-nøgle", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemer med at få adgang til Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS-model", "TTS Settings": "TTS-indstillinger", "TTS Voice": "TTS-stemme", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index cab53b54f1b..38bb180c862 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Unterhaltungstitel oder Websuchanfragen generieren.", "a user": "ein Benutzer", "About": "Über", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Zugang", "Access Control": "Zugangskontrolle", "Accessible to all users": "Für alle Benutzer zugänglich", @@ -93,7 +94,7 @@ "Artifacts": "Artefakte", "Ask a question": "Stellen Sie eine Frage", "Assistant": "Assistent", - "Attach file": "Datei anhängen", + "Attach file from knowledge": "", "Attention to detail": "Aufmerksamkeit für Details", "Attribute for Mail": "Attribut für E-Mail", "Attribute for Username": "Attribut für Benutzername", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Installieren Sie keine Funktionen aus Quellen, denen Sie nicht vollständig vertrauen.", "Do not install tools from sources you do not fully trust.": "Installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vollständig vertrauen.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentation", "Documents": "Dokumente", "does not make any external connections, and your data stays securely on your locally hosted server.": "stellt keine externen Verbindungen her, und Ihre Daten bleiben sicher auf Ihrem lokal gehosteten Server.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Nachrichtenbewertung aktivieren", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat Sampling zur Steuerung der Perplexität aktivieren. (Standard: 0, 0 = Deaktiviert, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Registrierung erlauben", + "Enable OneDrive": "", "Enable Web Search": "Websuche aktivieren", "Enabled": "Aktiviert", "Engine": "Engine", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Geben Sie die Blocküberlappung ein", "Enter Chunk Size": "Geben Sie die Blockgröße ein", "Enter description": "Geben Sie eine Beschreibung ein", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "Geben Sie den Exa-API-Schlüssel ein", "Enter Github Raw URL": "Geben Sie die Github Raw-URL ein", @@ -510,6 +516,7 @@ "General Settings": "Allgemeine Einstellungen", "Generate an image": "Bild erzeugen", "Generate Image": "Bild erzeugen", + "Generate prompt pair": "", "Generating search query": "Suchanfrage wird erstellt", "Get started": "Loslegen", "Get started with {{WEBUI_NAME}}": "Loslegen mit {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Lokal", "Local Models": "Lokale Modelle", + "Location access not allowed": "", "Lost": "Verloren", "LTR": "LTR", "Made by Open WebUI Community": "Von der OpenWebUI-Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama-API-Einstellungen aktualisiert", "Ollama Version": "Ollama-Version", "On": "Ein", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Nur alphanumerische Zeichen und Bindestriche sind erlaubt", "Only alphanumeric characters and hyphens are allowed in the command string.": "In der Befehlszeichenfolge sind nur alphanumerische Zeichen und Bindestriche erlaubt.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Nur Sammlungen können bearbeitet werden. Erstellen Sie eine neue Wissensbasis, um Dokumente zu bearbeiten/hinzuzufügen.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Wählen Sie eine Ollama-Instanz", "Select Engine": "Engine auswählen", "Select Knowledge": "Wissensdatenbank auswählen", - "Select model": "Modell auswählen", "Select only one model to call": "Wählen Sie nur ein Modell zum Anrufen aus", "Selected model(s) do not support image inputs": "Ihre ausgewählten Modelle unterstützen keine Bildeingaben", "Semantic distance to query": "Semantische Distanz zur Abfrage", @@ -957,6 +965,7 @@ "Tags Generation": "Tag-Generierung", "Tags Generation Prompt": "Prompt für Tag-Generierung", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)", + "Talk to model": "", "Tap to interrupt": "Zum Unterbrechen tippen", "Tasks": "", "Tavily API Key": "Tavily-API-Schlüssel", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS-Modell", "TTS Settings": "TTS-Einstellungen", "TTS Voice": "TTS-Stimme", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 132b5569895..5ac2802bd79 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "", "a user": "such user", "About": "Much About", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Attach file", + "Attach file from knowledge": "", "Attention to detail": "", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "Documents", "does not make any external connections, and your data stays securely on your locally hosted server.": "does not connect external, data stays safe locally.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Enable New Bark Ups", + "Enable OneDrive": "", "Enable Web Search": "", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Enter Overlap of Chunks", "Enter Chunk Size": "Enter Size of Chunk", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "", @@ -510,6 +516,7 @@ "General Settings": "General Doge Settings", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "", "Made by Open WebUI Community": "Made by Open WebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama Version", "On": "On", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Only wow characters and hyphens are allowed in the bork string.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Select model much choice", "Select only one model to call": "", "Selected model(s) do not support image inputs": "", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P very top", "Transformers": "", "Trouble accessing Ollama?": "Trouble accessing Ollama? Much trouble?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS Settings much settings", "TTS Voice": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index cd8c5e9a732..26cd69b005e 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Ένα μοντέλο εργασίας χρησιμοποιείται κατά την εκτέλεση εργασιών όπως η δημιουργία τίτλων για συνομιλίες και αναζητήσεις στο διαδίκτυο", "a user": "ένας χρήστης", "About": "Σχετικά", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Πρόσβαση", "Access Control": "Έλεγχος Πρόσβασης", "Accessible to all users": "Προσβάσιμο σε όλους τους χρήστες", @@ -93,7 +94,7 @@ "Artifacts": "Αρχεία", "Ask a question": "Ρωτήστε μια ερώτηση", "Assistant": "Βοηθός", - "Attach file": "Συνημμένο αρχείο", + "Attach file from knowledge": "", "Attention to detail": "Προσοχή στη λεπτομέρεια", "Attribute for Mail": "", "Attribute for Username": "Ιδιότητα για Όνομα Χρήστη", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Μην εγκαθιστάτε λειτουργίες από πηγές που δεν εμπιστεύεστε πλήρως.", "Do not install tools from sources you do not fully trust.": "Μην εγκαθιστάτε εργαλεία από πηγές που δεν εμπιστεύεστε πλήρως.", "Document": "Έγγραφο", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Τεκμηρίωση", "Documents": "Έγγραφα", "does not make any external connections, and your data stays securely on your locally hosted server.": "δεν κάνει καμία εξωτερική σύνδεση, και τα δεδομένα σας παραμένουν ασφαλή στον τοπικά φιλοξενούμενο διακομιστή σας.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Ενεργοποίηση Αξιολόγησης Μηνυμάτων", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ενεργοποίηση δειγματοληψίας Mirostat για έλεγχο της περιπλοκότητας. (Προεπιλογή: 0, 0 = Απενεργοποιημένο, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Ενεργοποίηση Νέων Εγγραφών", + "Enable OneDrive": "", "Enable Web Search": "Ενεργοποίηση Αναζήτησης στο Διαδίκτυο", "Enabled": "Ενεργοποιημένο", "Engine": "Μηχανή", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Εισάγετε την Επικάλυψη Τμημάτων", "Enter Chunk Size": "Εισάγετε το Μέγεθος Τμημάτων", "Enter description": "Εισάγετε την περιγραφή", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Εισάγετε το Github Raw URL", @@ -510,6 +516,7 @@ "General Settings": "Γενικές Ρυθμίσεις", "Generate an image": "", "Generate Image": "Δημιουργία Εικόνας", + "Generate prompt pair": "", "Generating search query": "Γενιά αναζήτησης ερώτησης", "Get started": "Ξεκινήστε", "Get started with {{WEBUI_NAME}}": "Ξεκινήστε με {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Τοπικό", "Local Models": "Τοπικά Μοντέλα", + "Location access not allowed": "", "Lost": "Χαμένος", "LTR": "LTR", "Made by Open WebUI Community": "Δημιουργήθηκε από την Κοινότητα OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Οι ρυθμίσεις API Ollama ενημερώθηκαν", "Ollama Version": "Έκδοση Ollama", "On": "On", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Επιτρέπονται μόνο αλφαριθμητικοί χαρακτήρες και παύλες", "Only alphanumeric characters and hyphens are allowed in the command string.": "Επιτρέπονται μόνο αλφαριθμητικοί χαρακτήρες και παύλες στο string της εντολής.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Μόνο συλλογές μπορούν να επεξεργαστούν, δημιουργήστε μια νέα βάση γνώσης για επεξεργασία/προσθήκη εγγράφων.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Επιλέξτε Μηχανή", "Select Knowledge": "Επιλέξτε Γνώση", - "Select model": "Επιλέξτε μοντέλο", "Select only one model to call": "Επιλέξτε μόνο ένα μοντέλο για κλήση", "Selected model(s) do not support image inputs": "Τα επιλεγμένα μοντέλα δεν υποστηρίζουν είσοδο εικόνων", "Semantic distance to query": "Σημαντική απόσταση προς την ερώτηση", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Η δειγματοληψία Tail free χρησιμοποιείται για να μειώσει την επίδραση των λιγότερο πιθανών tokens από την έξοδο. Μια υψηλότερη τιμή (π.χ., 2.0) θα μειώσει την επίδραση περισσότερο, ενώ μια τιμή 1.0 απενεργοποιεί αυτή τη ρύθμιση. (προεπιλογή: 1)", + "Talk to model": "", "Tap to interrupt": "Πατήστε για παύση", "Tasks": "", "Tavily API Key": "Κλειδί API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Προβλήματα πρόσβασης στο Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Μοντέλο TTS", "TTS Settings": "Ρυθμίσεις TTS", "TTS Voice": "Φωνή TTS", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index 9e52a557b2f..b77efe5b742 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "", "a user": "", "About": "", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "", + "Attach file from knowledge": "", "Attention to detail": "", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "", "does not make any external connections, and your data stays securely on your locally hosted server.": "", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "", + "Enable OneDrive": "", "Enable Web Search": "", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "", "Enter Chunk Size": "", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "", @@ -510,6 +516,7 @@ "General Settings": "", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "", "Made by Open WebUI Community": "", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "", "On": "", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "", "Select only one model to call": "", "Selected model(s) do not support image inputs": "", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "", "TTS Voice": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index 9e52a557b2f..b77efe5b742 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "", "a user": "", "About": "", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "", + "Attach file from knowledge": "", "Attention to detail": "", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "", "does not make any external connections, and your data stays securely on your locally hosted server.": "", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "", + "Enable OneDrive": "", "Enable Web Search": "", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "", "Enter Chunk Size": "", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "", @@ -510,6 +516,7 @@ "General Settings": "", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "", "Made by Open WebUI Community": "", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "", "On": "", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "", "Select only one model to call": "", "Selected model(s) do not support image inputs": "", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "", "TTS Voice": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index f819406f297..5edbb41f216 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modelo de tareas se utiliza cuando se realizan tareas como la generación de títulos para chats y consultas de búsqueda web", "a user": "un usuario", "About": "Sobre nosotros", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Acceso", "Access Control": "Control de Acceso", "Accessible to all users": "Accesible para todos los usuarios", @@ -93,7 +94,7 @@ "Artifacts": "Artefactos", "Ask a question": "Haz una pregunta", "Assistant": "Asistente", - "Attach file": "Adjuntar archivo", + "Attach file from knowledge": "", "Attention to detail": "Detalle preciso", "Attribute for Mail": "Atributo para correo", "Attribute for Username": "Atributo para el nombre de usuario", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "No instale funciones desde fuentes que no confíe totalmente.", "Do not install tools from sources you do not fully trust.": "No instale herramientas desde fuentes que no confíe totalmente.", "Document": "Documento", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentación", "Documents": "Documentos", "does not make any external connections, and your data stays securely on your locally hosted server.": "no realiza ninguna conexión externa y sus datos permanecen seguros en su servidor alojado localmente.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Habilitar la calificación de los mensajes", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilitar muestreo Mirostat para controlar la perplejidad. (Predeterminado: 0, 0 = Deshabilitado, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Habilitar Nuevos Registros", + "Enable OneDrive": "", "Enable Web Search": "Habilitar la búsqueda web", "Enabled": "Activado", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Ingresar superposición de fragmentos", "Enter Chunk Size": "Ingrese el tamaño del fragmento", "Enter description": "Ingrese la descripción", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "Ingrese la clave API de Exa", "Enter Github Raw URL": "Ingresa la URL sin procesar de Github", @@ -510,6 +516,7 @@ "General Settings": "Opciones Generales", "Generate an image": "Generar una imagen", "Generate Image": "Generar imagen", + "Generate prompt pair": "", "Generating search query": "Generación de consultas de búsqueda", "Get started": "Empezar", "Get started with {{WEBUI_NAME}}": "Empezar con {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Local", "Local Models": "Modelos locales", + "Location access not allowed": "", "Lost": "Perdido", "LTR": "LTR", "Made by Open WebUI Community": "Hecho por la comunidad de OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Configuración de Ollama API actualizada", "Ollama Version": "Versión de Ollama", "On": "Activado", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Sólo se permiten caracteres alfanuméricos y guiones", "Only alphanumeric characters and hyphens are allowed in the command string.": "Sólo se permiten caracteres alfanuméricos y guiones en la cadena de comando.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Solo se pueden editar las colecciones, crear una nueva base de conocimientos para editar / añadir documentos", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Seleccionar una instancia de Ollama", "Select Engine": "Selecciona Motor", "Select Knowledge": "Selecciona Conocimiento", - "Select model": "Selecciona un modelo", "Select only one model to call": "Selecciona sólo un modelo para llamar", "Selected model(s) do not support image inputs": "Los modelos seleccionados no admiten entradas de imagen", "Semantic distance to query": "Distancia semántica a la consulta", @@ -957,6 +965,7 @@ "Tags Generation": "Generación de etiquetas", "Tags Generation Prompt": "Prompt de generación de etiquetas", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El muestreo libre de cola se utiliza para reducir el impacto de los tokens menos probables en la salida. Un valor más alto (p.ej., 2.0) reducirá el impacto más, mientras que un valor de 1.0 deshabilitará esta configuración. (predeterminado: 1)", + "Talk to model": "", "Tap to interrupt": "Toca para interrumpir", "Tasks": "", "Tavily API Key": "Clave API de Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformadores", "Trouble accessing Ollama?": "¿Problemas para acceder a Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Modelo TTS", "TTS Settings": "Configuración de TTS", "TTS Voice": "Voz del TTS", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index 6e481109af5..7841d5cbb35 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Ataza eredua erabiltzen da txatentzako izenburuak eta web bilaketa kontsultak sortzeko bezalako atazak egitean", "a user": "erabiltzaile bat", "About": "Honi buruz", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Sarbidea", "Access Control": "Sarbide Kontrola", "Accessible to all users": "Erabiltzaile guztientzat eskuragarri", @@ -93,7 +94,7 @@ "Artifacts": "Artefaktuak", "Ask a question": "Egin galdera bat", "Assistant": "Laguntzailea", - "Attach file": "Erantsi fitxategia", + "Attach file from knowledge": "", "Attention to detail": "Xehetasunei arreta", "Attribute for Mail": "", "Attribute for Username": "Erabiltzaile-izenerako atributua", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen funtzioak.", "Do not install tools from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen tresnak.", "Document": "Dokumentua", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentazioa", "Documents": "Dokumentuak", "does not make any external connections, and your data stays securely on your locally hosted server.": "ez du kanpo konexiorik egiten, eta zure datuak modu seguruan mantentzen dira zure zerbitzari lokalean.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Gaitu Mezuen Balorazioa", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Gaitu Mirostat laginketa nahasmena kontrolatzeko. (Lehenetsia: 0, 0 = Desgaituta, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Gaitu Izena Emate Berriak", + "Enable OneDrive": "", "Enable Web Search": "Gaitu Web Bilaketa", "Enabled": "Gaituta", "Engine": "Motorea", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Sartu Zatien Gainjartzea (chunk overlap)", "Enter Chunk Size": "Sartu Zati Tamaina", "Enter description": "Sartu deskribapena", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Sartu Github Raw URLa", @@ -510,6 +516,7 @@ "General Settings": "Ezarpen Orokorrak", "Generate an image": "", "Generate Image": "Sortu Irudia", + "Generate prompt pair": "", "Generating search query": "Bilaketa kontsulta sortzen", "Get started": "Hasi", "Get started with {{WEBUI_NAME}}": "Hasi {{WEBUI_NAME}}-rekin", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Lokala", "Local Models": "Modelo lokalak", + "Location access not allowed": "", "Lost": "Galduta", "LTR": "LTR", "Made by Open WebUI Community": "OpenWebUI Komunitateak egina", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API ezarpenak eguneratu dira", "Ollama Version": "Ollama bertsioa", "On": "Piztuta", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Karaktere alfanumerikoak eta marratxoak soilik onartzen dira", "Only alphanumeric characters and hyphens are allowed in the command string.": "Karaktere alfanumerikoak eta marratxoak soilik onartzen dira komando katean.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Bildumak soilik edita daitezke, sortu ezagutza-base berri bat dokumentuak editatzeko/gehitzeko.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Hautatu motorra", "Select Knowledge": "Hautatu ezagutza", - "Select model": "Hautatu modeloa", "Select only one model to call": "Hautatu modelo bakarra deitzeko", "Selected model(s) do not support image inputs": "Hautatutako modelo(e)k ez dute irudi sarrerarik onartzen", "Semantic distance to query": "Kontsultarako distantzia semantikoa", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Etiketa sortzeko prompta", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Isats-libre laginketa erabiltzen da irteran probabilitate txikiagoko tokenen eragina murrizteko. Balio altuago batek (adib., 2.0) eragina gehiago murriztuko du, 1.0 balioak ezarpen hau desgaitzen duen bitartean. (lehenetsia: 1)", + "Talk to model": "", "Tap to interrupt": "Ukitu eteteko", "Tasks": "", "Tavily API Key": "Tavily API gakoa", @@ -1041,6 +1050,7 @@ "Top P": "Goiko P", "Transformers": "Transformatzaileak", "Trouble accessing Ollama?": "Arazoak Ollama atzitzeko?", + "Trust Proxy Environment": "", "TTS Model": "TTS modeloa", "TTS Settings": "TTS ezarpenak", "TTS Voice": "TTS ahotsa", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 3de2a4943f0..afeea877587 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "یک مدل وظیفه هنگام انجام وظایف مانند تولید عناوین برای چت ها و نمایش های جستجوی وب استفاده می شود.", "a user": "یک کاربر", "About": "درباره", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "سوالی بپرسید", "Assistant": "دستیار", - "Attach file": "پیوست پرونده", + "Attach file from knowledge": "", "Attention to detail": "دقیق", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "سند", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "اسناد", "does not make any external connections, and your data stays securely on your locally hosted server.": "هیچ اتصال خارجی ایجاد نمی کند و داده های شما به طور ایمن در سرور میزبان محلی شما باقی می ماند.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "فعال کردن ثبت نام\u200cهای جدید", + "Enable OneDrive": "", "Enable Web Search": "فعالسازی جستجوی وب", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "مقدار Chunk Overlap را وارد کنید", "Enter Chunk Size": "مقدار Chunk Size را وارد کنید", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "ادرس Github Raw را وارد کنید", @@ -510,6 +516,7 @@ "General Settings": "تنظیمات عمومی", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "در حال تولید پرسوجوی جستجو", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "ساخته شده توسط OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "نسخه اولاما", "On": "روشن", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "فقط کاراکترهای الفبایی و خط فاصله در رشته فرمان مجاز هستند.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "انتخاب موتور", "Select Knowledge": "انتخاب دانش", - "Select model": "انتخاب یک مدل", "Select only one model to call": "تنها یک مدل را برای صدا زدن انتخاب کنید", "Selected model(s) do not support image inputs": "مدل) های (انتخاب شده ورودیهای تصویر را پشتیبانی نمیکند", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "در دسترسی به اولاما مشکل دارید؟", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "تنظیمات TTS", "TTS Voice": "", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index 6d13919ae21..dd4c7ab93df 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Tehtävämallia käytetään tehtävien suorittamiseen, kuten otsikoiden luomiseen keskusteluille ja verkkohakukyselyille", "a user": "käyttäjä", "About": "Tietoja", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Pääsy", "Access Control": "Käyttöoikeuksien hallinta", "Accessible to all users": "Käytettävissä kaikille käyttäjille", @@ -93,7 +94,7 @@ "Artifacts": "Artefaktit", "Ask a question": "Kysyä kysymys", "Assistant": "Avustaja", - "Attach file": "Liitä tiedosto", + "Attach file from knowledge": "", "Attention to detail": "Huomio yksityiskohtiin", "Attribute for Mail": "", "Attribute for Username": "Käyttäjänimi-määritämä", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Älä asenna toimintoja lähteistä, joihin et luota täysin.", "Do not install tools from sources you do not fully trust.": "Älä asenna työkaluja lähteistä, joihin et luota täysin.", "Document": "Asiakirja", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentaatio", "Documents": "Asiakirjat", "does not make any external connections, and your data stays securely on your locally hosted server.": "ei tee ulkoisia yhteyksiä, ja tietosi pysyvät turvallisesti paikallisesti isännöidyllä palvelimellasi.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Ota viestiarviointi käyttöön", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ota Mirostat-näytteenotto käyttöön hallinnan monimerkityksellisyydelle. (Oletus: 0, 0 = Ei käytössä, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Salli uudet rekisteröitymiset", + "Enable OneDrive": "", "Enable Web Search": "Ota verkkohaku käyttöön", "Enabled": "Käytössä", "Engine": "Moottori", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Syötä osien päällekkäisyys", "Enter Chunk Size": "Syötä osien koko", "Enter description": "Kirjoita kuvaus", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Verkko-osoitteet erotetaan pilkulla (esim. esimerkki.com,sivu.org", "Enter Exa API Key": "Kirjoita Exa API -avain", "Enter Github Raw URL": "Kirjoita Github Raw -verkko-osoite", @@ -510,6 +516,7 @@ "General Settings": "Yleiset asetukset", "Generate an image": "Luo kuva", "Generate Image": "Luo kuva", + "Generate prompt pair": "", "Generating search query": "Luodaan hakukyselyä", "Get started": "Aloita", "Get started with {{WEBUI_NAME}}": "Aloita käyttämään {{WEBUI_NAME}}:iä", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "Ladataan Kokoro.js...", "Local": "Paikallinen", "Local Models": "Paikalliset mallit", + "Location access not allowed": "", "Lost": "Mennyt", "LTR": "LTR", "Made by Open WebUI Community": "Tehnyt OpenWebUI-yhteisö", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API -asetukset päivitetty", "Ollama Version": "Ollama-versio", "On": "Päällä", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja", "Only alphanumeric characters and hyphens are allowed in the command string.": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja komentosarjassa.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Vain kokoelmia voi muokata, luo uusi tietokanta muokataksesi/lisätäksesi asiakirjoja.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Valitse Ollama instanssi", "Select Engine": "Valitse moottori", "Select Knowledge": "Valitse tietämys", - "Select model": "Valitse malli", "Select only one model to call": "Valitse vain yksi malli kutsuttavaksi", "Selected model(s) do not support image inputs": "Valitut mallit eivät tue kuvasöytteitä", "Semantic distance to query": "Semanttinen etäisyys kyselyyn", @@ -957,6 +965,7 @@ "Tags Generation": "Tagien luonti", "Tags Generation Prompt": "Tagien luontikehote", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-free-otanta käytetään vähentämään vähemmän todennäköisten tokenien vaikutusta tulokseen. Korkeampi arvo (esim. 2,0) vähentää vaikutusta enemmän, kun taas arvo 1,0 poistaa tämän asetuksen käytöstä. (oletus: 1)", + "Talk to model": "", "Tap to interrupt": "Napauta keskeyttääksesi", "Tasks": "Tehtävät", "Tavily API Key": "Tavily API -avain", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Muunnokset", "Trouble accessing Ollama?": "Ongelmia Ollama-yhteydessä?", + "Trust Proxy Environment": "", "TTS Model": "Puhesynteesimalli", "TTS Settings": "Puhesynteesiasetukset", "TTS Voice": "Puhesynteesiääni", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index 32df3741031..6e125295103 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.", "a user": "un utilisateur", "About": "À propos", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Joindre un document", + "Attach file from knowledge": "", "Attention to detail": "Attention aux détails", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentation", "Documents": "Documents", "does not make any external connections, and your data stays securely on your locally hosted server.": "ne fait aucune connexion externe et garde vos données en sécurité sur votre serveur local.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Activer les nouvelles inscriptions", + "Enable OneDrive": "", "Enable Web Search": "Activer la recherche sur le Web", "Enabled": "", "Engine": "Moteur", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Entrez le chevauchement de chunk", "Enter Chunk Size": "Entrez la taille de bloc", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Entrez l'URL brute de GitHub", @@ -510,6 +516,7 @@ "General Settings": "Paramètres Généraux", "Generate an image": "", "Generate Image": "Générer une image", + "Generate prompt pair": "", "Generating search query": "Génération d'une requête de recherche", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Modèles locaux", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Réalisé par la communauté OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Version Ollama améliorée", "On": "Activé", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Sélectionnez un modèle", "Select only one model to call": "Sélectionnez seulement un modèle pour appeler", "Selected model(s) do not support image inputs": "Les modèle(s) sélectionné(s) ne prennent pas en charge les entrées d'images", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Appuyez pour interrompre", "Tasks": "", "Tavily API Key": "Clé API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Rencontrez-vous des difficultés pour accéder à Ollama ?", + "Trust Proxy Environment": "", "TTS Model": "Modèle de synthèse vocale", "TTS Settings": "Paramètres de synthèse vocale", "TTS Voice": "Voix TTS", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 550db46b5eb..f517c25fd86 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.", "a user": "un utilisateur", "About": "À propos", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Accès", "Access Control": "Contrôle d'accès", "Accessible to all users": "Accessible à tous les utilisateurs", @@ -93,7 +94,7 @@ "Artifacts": "Artéfacts", "Ask a question": "Posez votre question", "Assistant": "Assistant", - "Attach file": "Joindre un document", + "Attach file from knowledge": "", "Attention to detail": "Attention aux détails", "Attribute for Mail": "Attribut pour l'e-mail", "Attribute for Username": "Attribut pour le nom d'utilisateur", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "N'installez pas de fonctions provenant de sources auxquelles vous ne faites pas entièrement confiance.", "Do not install tools from sources you do not fully trust.": "N'installez pas d'outils provenant de sources auxquelles vous ne faites pas entièrement confiance.", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentation", "Documents": "Documents", "does not make any external connections, and your data stays securely on your locally hosted server.": "n'établit aucune connexion externe et garde vos données en sécurité sur votre serveur local.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Activer l'évaluation des messages", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activer l'échantillonnage Mirostat pour contrôler la perplexité. (Par défaut : 0, 0 = Désactivé, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Activer les nouvelles inscriptions", + "Enable OneDrive": "", "Enable Web Search": "Activer la recherche Web", "Enabled": "Activé", "Engine": "Moteur", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Entrez le chevauchement des chunks", "Enter Chunk Size": "Entrez la taille des chunks", "Enter description": "Entrez la description", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Entrez l'URL brute de GitHub", @@ -510,6 +516,7 @@ "General Settings": "Paramètres généraux", "Generate an image": "", "Generate Image": "Générer une image", + "Generate prompt pair": "", "Generating search query": "Génération d'une requête de recherche", "Get started": "Commencer", "Get started with {{WEBUI_NAME}}": "Commencez avec {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Local", "Local Models": "Modèles locaux", + "Location access not allowed": "", "Lost": "Perdu", "LTR": "LTR", "Made by Open WebUI Community": "Réalisé par la communauté OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Paramètres de l'API Ollama mis à jour", "Ollama Version": "Version Ollama", "On": "Activé", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Seuls les caractères alphanumériques et les tirets sont autorisés", "Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Seules les collections peuvent être modifiées, créez une nouvelle base de connaissance pour modifier/ajouter des documents.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Sélectionnez une instance Ollama", "Select Engine": "Sélectionnez le moteur", "Select Knowledge": "Sélectionnez une connaissance", - "Select model": "Sélectionner un modèle", "Select only one model to call": "Sélectionnez seulement un modèle pour appeler", "Selected model(s) do not support image inputs": "Les modèle(s) sélectionné(s) ne prennent pas en charge les entrées d'images", "Semantic distance to query": "Distance sémantique à la requête", @@ -957,6 +965,7 @@ "Tags Generation": "Génération de tags", "Tags Generation Prompt": "Prompt de génération de tags", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "L'échantillonnage sans queue est utilisé pour réduire l'impact des tokens moins probables dans la sortie. Une valeur plus élevée (par exemple 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)", + "Talk to model": "", "Tap to interrupt": "Appuyez pour interrompre", "Tasks": "", "Tavily API Key": "Clé API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Problèmes d'accès à Ollama ?", + "Trust Proxy Environment": "", "TTS Model": "Modèle de Text-to-Speech", "TTS Settings": "Paramètres de Text-to-Speech", "TTS Voice": "Voix de Text-to-Speech", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 51ea3483cc3..63322c7b8c0 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "מודל משימה משמש בעת ביצוע משימות כגון יצירת כותרות עבור צ'אטים ושאילתות חיפוש באינטרנט", "a user": "משתמש", "About": "אודות", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "צרף קובץ", + "Attach file from knowledge": "", "Attention to detail": "תשומת לב לפרטים", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "מסמך", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "מסמכים", "does not make any external connections, and your data stays securely on your locally hosted server.": "לא מבצע חיבורים חיצוניים, והנתונים שלך נשמרים באופן מאובטח בשרת המקומי שלך.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "אפשר הרשמות חדשות", + "Enable OneDrive": "", "Enable Web Search": "הפיכת חיפוש באינטרנט לזמין", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "הזן חפיפת נתונים", "Enter Chunk Size": "הזן גודל נתונים", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "הזן כתובת URL של Github Raw", @@ -510,6 +516,7 @@ "General Settings": "הגדרות כלליות", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "יצירת שאילתת חיפוש", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "נוצר על ידי קהילת OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "גרסת Ollama", "On": "פועל", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "רק תווים אלפאנומריים ומקפים מותרים במחרוזת הפקודה.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "בחר מודל", "Select only one model to call": "", "Selected model(s) do not support image inputs": "דגמים נבחרים אינם תומכים בקלט תמונה", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "קשה לגשת לOllama?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "הגדרות TTS", "TTS Voice": "", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 66a81e16424..2639e3c75db 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "चैट और वेब खोज क्वेरी के लिए शीर्षक उत्पन्न करने जैसे कार्य करते समय कार्य मॉडल का उपयोग किया जाता है", "a user": "एक उपयोगकर्ता", "About": "हमारे बारे में", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "फ़ाइल atta", + "Attach file from knowledge": "", "Attention to detail": "विस्तार पर ध्यान", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "दस्तावेज़", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "दस्तावेज़", "does not make any external connections, and your data stays securely on your locally hosted server.": "कोई बाहरी कनेक्शन नहीं बनाता है, और आपका डेटा आपके स्थानीय रूप से होस्ट किए गए सर्वर पर सुरक्षित रूप से रहता है।", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "नए साइन अप सक्रिय करें", + "Enable OneDrive": "", "Enable Web Search": "वेब खोज सक्षम करें", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "चंक ओवरलैप दर्ज करें", "Enter Chunk Size": "खंड आकार दर्ज करें", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Github Raw URL दर्ज करें", @@ -510,6 +516,7 @@ "General Settings": "सामान्य सेटिंग्स", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "खोज क्वेरी जनरेट करना", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "OpenWebUI समुदाय द्वारा निर्मित", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama Version", "On": "चालू", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "कमांड स्ट्रिंग में केवल अल्फ़ान्यूमेरिक वर्ण और हाइफ़न की अनुमति है।", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "मॉडल चुनें", "Select only one model to call": "", "Selected model(s) do not support image inputs": "चयनित मॉडल छवि इनपुट का समर्थन नहीं करते हैं", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "शीर्ष P", "Transformers": "", "Trouble accessing Ollama?": "Ollama तक पहुँचने में परेशानी हो रही है?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS सेटिंग्स", "TTS Voice": "", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index e82be7ccfcf..d9c8e19395e 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model zadatka koristi se pri izvođenju zadataka kao što su generiranje naslova za razgovore i upite za pretraživanje weba", "a user": "korisnik", "About": "O aplikaciji", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Priloži datoteku", + "Attach file from knowledge": "", "Attention to detail": "Pažnja na detalje", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentacija", "Documents": "Dokumenti", "does not make any external connections, and your data stays securely on your locally hosted server.": "ne uspostavlja vanjske veze, a vaši podaci ostaju sigurno na vašem lokalno hostiranom poslužitelju.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Omogući nove prijave", + "Enable OneDrive": "", "Enable Web Search": "Omogući pretraživanje weba", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Unesite preklapanje dijelova", "Enter Chunk Size": "Unesite veličinu dijela", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Unesite Github sirovi URL", @@ -510,6 +516,7 @@ "General Settings": "Opće postavke", "Generate an image": "", "Generate Image": "Gneriraj sliku", + "Generate prompt pair": "", "Generating search query": "Generiranje upita za pretraživanje", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokalni modeli", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Izradio OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama verzija", "On": "Uključeno", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Samo alfanumerički znakovi i crtice su dopušteni u naredbenom nizu.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Odaberite model", "Select only one model to call": "Odaberite samo jedan model za poziv", "Selected model(s) do not support image inputs": "Odabrani modeli ne podržavaju unose slika", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemi s pristupom Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS model", "TTS Settings": "TTS postavke", "TTS Voice": "TTS glas", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index 6e7089e4260..82e14f2a030 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "A feladat modell olyan feladatokhoz használatos, mint a beszélgetések címeinek generálása és webes keresési lekérdezések", "a user": "egy felhasználó", "About": "Névjegy", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "Műtermékek", "Ask a question": "Kérdezz valamit", "Assistant": "Asszisztens", - "Attach file": "Fájl csatolása", + "Attach file from knowledge": "", "Attention to detail": "Részletekre való odafigyelés", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Ne telepíts funkciókat olyan forrásokból, amelyekben nem bízol teljesen.", "Do not install tools from sources you do not fully trust.": "Ne telepíts eszközöket olyan forrásokból, amelyekben nem bízol teljesen.", "Document": "Dokumentum", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentáció", "Documents": "Dokumentumok", "does not make any external connections, and your data stays securely on your locally hosted server.": "nem létesít külső kapcsolatokat, és az adataid biztonságban maradnak a helyileg hosztolt szervereden.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Üzenet értékelés engedélyezése", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Új regisztrációk engedélyezése", + "Enable OneDrive": "", "Enable Web Search": "Webes keresés engedélyezése", "Enabled": "Engedélyezve", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Add meg a darab átfedést", "Enter Chunk Size": "Add meg a darab méretet", "Enter description": "Add meg a leírást", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Add meg a Github Raw URL-t", @@ -510,6 +516,7 @@ "General Settings": "Általános beállítások", "Generate an image": "", "Generate Image": "Kép generálása", + "Generate prompt pair": "", "Generating search query": "Keresési lekérdezés generálása", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Helyi modellek", + "Location access not allowed": "", "Lost": "Elveszett", "LTR": "LTR", "Made by Open WebUI Community": "Az OpenWebUI közösség által készítve", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama verzió", "On": "Be", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Csak alfanumerikus karakterek és kötőjelek engedélyezettek a parancssorban.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Csak gyűjtemények szerkeszthetők, hozzon létre új tudásbázist dokumentumok szerkesztéséhez/hozzáadásához.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Motor kiválasztása", "Select Knowledge": "Tudásbázis kiválasztása", - "Select model": "Modell kiválasztása", "Select only one model to call": "Csak egy modellt válasszon ki hívásra", "Selected model(s) do not support image inputs": "A kiválasztott modell(ek) nem támogatják a képbemenetet", "Semantic distance to query": "Szemantikai távolság a lekérdezéshez", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Címke generálási prompt", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Koppintson a megszakításhoz", "Tasks": "", "Tavily API Key": "Tavily API kulcs", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problémája van az Ollama elérésével?", + "Trust Proxy Environment": "", "TTS Model": "TTS modell", "TTS Settings": "TTS beállítások", "TTS Voice": "TTS hang", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index 8480724083c..49ba39739ac 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model tugas digunakan saat melakukan tugas seperti membuat judul untuk obrolan dan kueri penelusuran web", "a user": "seorang pengguna", "About": "Tentang", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Lampirkan file", + "Attach file from knowledge": "", "Attention to detail": "Perhatian terhadap detail", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Dokumen", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentasi", "Documents": "Dokumen", "does not make any external connections, and your data stays securely on your locally hosted server.": "tidak membuat koneksi eksternal apa pun, dan data Anda tetap aman di server yang dihosting secara lokal.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Aktifkan Pendaftaran Baru", + "Enable OneDrive": "", "Enable Web Search": "Aktifkan Pencarian Web", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Masukkan Tumpang Tindih Chunk", "Enter Chunk Size": "Masukkan Ukuran Potongan", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Masukkan URL Mentah Github", @@ -510,6 +516,7 @@ "General Settings": "Pengaturan Umum", "Generate an image": "", "Generate Image": "Menghasilkan Gambar", + "Generate prompt pair": "", "Generating search query": "Membuat kueri penelusuran", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Model Lokal", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Dibuat oleh Komunitas OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Versi Ollama", "On": "Aktif", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Hanya karakter alfanumerik dan tanda hubung yang diizinkan dalam string perintah.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Pilih model", "Select only one model to call": "Pilih hanya satu model untuk dipanggil", "Selected model(s) do not support image inputs": "Model yang dipilih tidak mendukung input gambar", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Ketuk untuk menyela", "Tasks": "", "Tavily API Key": "Kunci API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "P Atas", "Transformers": "", "Trouble accessing Ollama?": "Kesulitan mengakses Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model TTS", "TTS Settings": "Pengaturan TTS", "TTS Voice": "Suara TTS", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 131e680f21f..132944cb9ee 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Úsáidtear múnla tasc agus tascanna á ndéanamh agat mar theidil a ghiniúint do chomhráite agus ceisteanna cuardaigh gréasáin", "a user": "úsáideoir", "About": "Maidir", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Rochtain", "Access Control": "Rialaithe Rochtana", "Accessible to all users": "Inrochtana do gach úsáideoir", @@ -93,7 +94,7 @@ "Artifacts": "Déantáin", "Ask a question": "Cuir ceist", "Assistant": "Cúntóir", - "Attach file": "Ceangail comhad", + "Attach file from knowledge": "", "Attention to detail": "Aird ar mhionsonraí", "Attribute for Mail": "Tréith don Phost", "Attribute for Username": "Tréith don Ainm Úsáideora", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Ná suiteáil feidhmeanna ó fhoinsí nach bhfuil muinín iomlán agat.", "Do not install tools from sources you do not fully trust.": "Ná suiteáil uirlisí ó fhoinsí nach bhfuil muinín iomlán agat.", "Document": "Doiciméad", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Doiciméadú", "Documents": "Doiciméid", "does not make any external connections, and your data stays securely on your locally hosted server.": "ní dhéanann sé aon naisc sheachtracha, agus fanann do chuid sonraí go slán ar do fhreastalaí a óstáiltear go háitiúil.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Cumasaigh Rátáil Teachtai", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Cumasaigh sampláil Mirostat chun seachrán a rialú. (Réamhshocrú: 0, 0 = Díchumasaithe, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Cumasaigh Clárúcháin Nua", + "Enable OneDrive": "", "Enable Web Search": "Cumasaigh Cuardach Gréasáin", "Enabled": "Cumasaithe", "Engine": "Inneall", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Cuir isteach Chunk Forluí", "Enter Chunk Size": "Cuir isteach Méid an Chunc", "Enter description": "Iontráil cur síos", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "Cuir isteach Eochair Exa API", "Enter Github Raw URL": "Cuir isteach URL Github Raw", @@ -510,6 +516,7 @@ "General Settings": "Socruithe Ginearálta", "Generate an image": "Gin íomhá", "Generate Image": "Ginigh Íomhá", + "Generate prompt pair": "", "Generating search query": "Giniúint ceist cuardaigh", "Get started": "Cuir tús leis", "Get started with {{WEBUI_NAME}}": "Cuir tús le {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Áitiúil", "Local Models": "Múnlaí Áitiúla", + "Location access not allowed": "", "Lost": "Cailleadh", "LTR": "LTR", "Made by Open WebUI Community": "Déanta ag OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Nuashonraíodh socruithe Olama API", "Ollama Version": "Leagan Ollama", "On": "Ar", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Ní cheadaítear ach carachtair alfa-uimhriúla agus fleiscíní", "Only alphanumeric characters and hyphens are allowed in the command string.": "Ní cheadaítear ach carachtair alfauméireacha agus braithíní sa sreangán ordaithe.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Ní féidir ach bailiúcháin a chur in eagar, bonn eolais nua a chruthú chun doiciméid a chur in eagar/a chur leis.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Roghnaigh sampla Olama", "Select Engine": "Roghnaigh Inneall", "Select Knowledge": "Roghnaigh Eolais", - "Select model": "Roghnaigh múnla", "Select only one model to call": "Roghnaigh múnla amháin le glaoch", "Selected model(s) do not support image inputs": "Ní tacaíonn an munla/nna roghnaithe le h-ionchuir íomhá", "Semantic distance to query": "Fad shéimeantach le fiosrú", @@ -957,6 +965,7 @@ "Tags Generation": "Giniúint Clibeanna", "Tags Generation Prompt": "Clibeanna Giniúint Leid", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Úsáidtear sampláil saor ó eireabaill chun tionchar na n-chomharthaí ón aschur nach bhfuil chomh dóchúil céanna a laghdú. Laghdóidh luach níos airde (m.sh., 2.0) an tionchar níos mó, agus díchumasaíonn luach 1.0 an socrú seo. (réamhshocraithe: 1)", + "Talk to model": "", "Tap to interrupt": "Tapáil chun cur isteach", "Tasks": "", "Tavily API Key": "Eochair API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Barr P", "Transformers": "Claochladáin", "Trouble accessing Ollama?": "Deacracht teacht ar Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS Múnla", "TTS Settings": "Socruithe TTS", "TTS Voice": "Guth TTS", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index a9309014791..38d5c9a496a 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modello di attività viene utilizzato durante l'esecuzione di attività come la generazione di titoli per chat e query di ricerca Web", "a user": "un utente", "About": "Informazioni", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Allega file", + "Attach file from knowledge": "", "Attention to detail": "Attenzione ai dettagli", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Documento", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "Documenti", "does not make any external connections, and your data stays securely on your locally hosted server.": "non effettua connessioni esterne e i tuoi dati rimangono al sicuro sul tuo server ospitato localmente.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Abilita nuove iscrizioni", + "Enable OneDrive": "", "Enable Web Search": "Abilita ricerca Web", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Inserisci la sovrapposizione chunk", "Enter Chunk Size": "Inserisci la dimensione chunk", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Immettere l'URL grezzo di Github", @@ -510,6 +516,7 @@ "General Settings": "Impostazioni generali", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "Generazione di query di ricerca", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Realizzato dalla comunità OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Versione Ollama", "On": "Attivato", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Nella stringa di comando sono consentiti solo caratteri alfanumerici e trattini.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Seleziona modello", "Select only one model to call": "", "Selected model(s) do not support image inputs": "I modelli selezionati non supportano l'input di immagini", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemi di accesso a Ollama?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "Impostazioni TTS", "TTS Voice": "", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 117e0f8bd7f..b50c9d88a34 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "タスクモデルは、チャットやウェブ検索クエリのタイトルの生成などのタスクを実行するときに使用されます", "a user": "ユーザー", "About": "概要", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "質問して下さい。", "Assistant": "", - "Attach file": "ファイルを添付する", + "Attach file from knowledge": "", "Attention to detail": "詳細に注意する", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "信頼できないソースからFunctionをインストールしないでください。", "Do not install tools from sources you do not fully trust.": "信頼出来ないソースからツールをインストールしないでください。", "Document": "ドキュメント", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "ドキュメント", "Documents": "ドキュメント", "does not make any external connections, and your data stays securely on your locally hosted server.": "外部接続を行わず、データはローカルでホストされているサーバー上に安全に保持されます。", @@ -354,6 +357,7 @@ "Enable Message Rating": "メッセージ評価を有効にする", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "新規登録を有効にする", + "Enable OneDrive": "", "Enable Web Search": "ウェブ検索を有効にする", "Enabled": "有効", "Engine": "エンジン", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "チャンクオーバーラップを入力してください", "Enter Chunk Size": "チャンクサイズを入力してください", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Github Raw URLを入力", @@ -510,6 +516,7 @@ "General Settings": "一般設定", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "検索クエリの生成", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "ローカルモデル", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "OpenWebUI コミュニティによって作成", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama バージョン", "On": "オン", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "コマンド文字列には英数字とハイフンのみが許可されています。", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "エンジンの選択", "Select Knowledge": "ナレッジベースの選択", - "Select model": "モデルを選択", "Select only one model to call": "", "Selected model(s) do not support image inputs": "一部のモデルは画像入力をサポートしていません", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "トップ P", "Transformers": "", "Trouble accessing Ollama?": "Ollama へのアクセスに問題がありますか?", + "Trust Proxy Environment": "", "TTS Model": "TTSモデル", "TTS Settings": "TTS 設定", "TTS Voice": "TTSボイス", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 63527dc05f7..e715cf7cd1d 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "დავალების მოდელი გამოიყენება ისეთი ამოცანების შესრულებისას, როგორიცაა ჩეთების სათაურების გენერირება და ვებ – ძიების მოთხოვნები", "a user": "მომხმარებელი", "About": "შესახებ", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "ფაილის ჩაწერა", + "Attach file from knowledge": "", "Attention to detail": "დეტალური მიმართვა", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "დოკუმენტი", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "დოკუმენტები", "does not make any external connections, and your data stays securely on your locally hosted server.": "არ ამყარებს გარე კავშირებს და თქვენი მონაცემები უსაფრთხოდ რჩება თქვენს ადგილობრივ სერვერზე.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "ახალი რეგისტრაციების ჩართვა", + "Enable OneDrive": "", "Enable Web Search": "ვებ ძიების ჩართვა", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "შეიყვანეთ ნაწილის გადახურვა", "Enter Chunk Size": "შეიყვანე ბლოკის ზომა", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "შეიყვანეთ Github Raw URL", @@ -510,6 +516,7 @@ "General Settings": "ზოგადი პარამეტრები", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "საძიებო მოთხოვნის გენერირება", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "დამზადებულია OpenWebUI საზოგადოების მიერ", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama ვერსია", "On": "ჩართვა", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "ბრძანების სტრიქონში დაშვებულია მხოლოდ ალფანუმერული სიმბოლოები და დეფისები.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "მოდელის არჩევა", "Select only one model to call": "", "Selected model(s) do not support image inputs": "შერჩეული მოდელი (ებ) ი არ უჭერს მხარს გამოსახულების შეყვანას", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "ტოპ P", "Transformers": "", "Trouble accessing Ollama?": "Ollama-ს ვერ უკავშირდები?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS პარამეტრები", "TTS Voice": "", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index bdda272b72a..3f90bd4e0c0 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "작업 모델은 채팅 및 웹 검색 쿼리에 대한 제목 생성 등의 작업 수행 시 사용됩니다.", "a user": "사용자", "About": "정보", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "접근", "Access Control": "접근 제어", "Accessible to all users": "모든 사용자가 접근 가능", @@ -93,7 +94,7 @@ "Artifacts": "아티팩트", "Ask a question": "질문하기", "Assistant": "어시스턴트", - "Attach file": "파일 첨부", + "Attach file from knowledge": "", "Attention to detail": "세부 사항에 대한 주의", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "불분명한 출처를 가진 함수를 설치하지마세요", "Do not install tools from sources you do not fully trust.": "불분명한 출처를 가진 도구를 설치하지마세요", "Document": "문서", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "문서 조사", "Documents": "문서", "does not make any external connections, and your data stays securely on your locally hosted server.": "외부와 어떠한 연결도 하지 않으며, 데이터는 로컬에서 호스팅되는 서버에 안전하게 유지됩니다.", @@ -354,6 +357,7 @@ "Enable Message Rating": "메시지 평가 활성화", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "새 회원가입 활성화", + "Enable OneDrive": "", "Enable Web Search": "웹 검색 활성화", "Enabled": "활성화됨", "Engine": "엔진", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "청크 오버랩 입력", "Enter Chunk Size": "청크 크기 입력", "Enter description": "설명 입력", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Github Raw URL 입력", @@ -510,6 +516,7 @@ "General Settings": "일반 설정", "Generate an image": "", "Generate Image": "이미지 생성", + "Generate prompt pair": "", "Generating search query": "검색 쿼리 생성", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "로컬 모델", + "Location access not allowed": "", "Lost": "패배", "LTR": "LTR", "Made by Open WebUI Community": "OpenWebUI 커뮤니티에 의해 개발됨", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama 버전", "On": "켜기", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "영문자, 숫자 및 하이픈(-)만 허용됨", "Only alphanumeric characters and hyphens are allowed in the command string.": "명령어 문자열에는 영문자, 숫자 및 하이픈(-)만 허용됩니다.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "가지고 있는 컬렉션만 수정 가능합니다, 새 지식 기반을 생성하여 문서를 수정 혹은 추가하십시오", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "엔진 선택", "Select Knowledge": "지식 기반 선택", - "Select model": "모델 선택", "Select only one model to call": "음성 기능을 위해서는 모델을 하나만 선택해야 합니다.", "Selected model(s) do not support image inputs": "선택한 모델은 이미지 입력을 지원하지 않습니다.", "Semantic distance to query": "쿼리까지 의미적 거리", @@ -957,6 +965,7 @@ "Tags Generation": "태그 생성", "Tags Generation Prompt": "태그 생성 프롬프트", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "탭하여 중단", "Tasks": "", "Tavily API Key": "Tavily API 키", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "올라마(Ollama)에 접근하는 데 문제가 있나요?", + "Trust Proxy Environment": "", "TTS Model": "TTS 모델", "TTS Settings": "TTS 설정", "TTS Voice": "TTS 음성", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 282d5a735cf..ba89914fbef 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Užduočių modelis naudojamas pokalbių pavadinimų ir paieškos užklausų generavimui.", "a user": "naudotojas", "About": "Apie", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Pridėti failą", + "Attach file from knowledge": "", "Attention to detail": "Dėmesys detalėms", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Neinstaliuokite funkcijų iš nepatikimų šaltinių", "Do not install tools from sources you do not fully trust.": "Neinstaliuokite įrankių iš nepatikimų šaltinių", "Document": "Dokumentas", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentacija", "Documents": "Dokumentai", "does not make any external connections, and your data stays securely on your locally hosted server.": "neturi jokių išorinių ryšių ir duomenys lieka serveryje.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Aktyvuoti naujas registracijas", + "Enable OneDrive": "", "Enable Web Search": "Leisti paiešką internete", "Enabled": "Leisti", "Engine": "Variklis", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Įveskite blokų persidengimą", "Enter Chunk Size": "Įveskite blokų dydį", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Įveskite GitHub Raw nuorodą", @@ -510,6 +516,7 @@ "General Settings": "Bendri nustatymai", "Generate an image": "", "Generate Image": "Generuoti paveikslėlį", + "Generate prompt pair": "", "Generating search query": "Generuoti paieškos užklausą", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokalūs modeliai", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Sukurta OpenWebUI bendruomenės", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama versija", "On": "Aktyvuota", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Leistinos tik raidės, skaičiai ir brūkšneliai.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Pasirinkti modelį", "Select only one model to call": "Pasirinkite vieną modelį", "Selected model(s) do not support image inputs": "Pasirinkti modeliai nepalaiko vaizdinių užklausų", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Paspauskite norėdami pertraukti", "Tasks": "", "Tavily API Key": "Tavily API raktas", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemos prieinant prie Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS modelis", "TTS Settings": "TTS parametrai", "TTS Voice": "TTS balsas", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 8506b3d8b3b..a0ab6b86be6 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model tugas digunakan semasa melaksanakan tugas seperti menjana tajuk untuk perbualan dan pertanyaan carian web.", "a user": "seorang pengguna", "About": "Mengenai", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Kepilkan Fail", + "Attach file from knowledge": "", "Attention to detail": "Perincian", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Jangan pasang fungsi daripada sumber yang anda tidak percayai sepenuhnya.", "Do not install tools from sources you do not fully trust.": "Jangan pasang alat daripada sumber yang anda tidak percaya sepenuhnya.", "Document": "Dokumen", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentasi", "Documents": "Dokumen", "does not make any external connections, and your data stays securely on your locally hosted server.": "tidak membuat sebarang sambungan luaran, dan data anda kekal selamat pada pelayan yang dihoskan ditempat anda", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Benarkan Pendaftaran Baharu", + "Enable OneDrive": "", "Enable Web Search": "Benarkan Carian Web", "Enabled": "Dibenarkan", "Engine": "Enjin", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Masukkan Tindihan 'Chunk'", "Enter Chunk Size": "Masukkan Saiz 'Chunk'", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Masukkan URL 'Github Raw'", @@ -510,6 +516,7 @@ "General Settings": "Tetapan Umum", "Generate an image": "", "Generate Image": "Jana Imej", + "Generate prompt pair": "", "Generating search query": "Jana pertanyaan carian", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Model Tempatan", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Dicipta oleh Komuniti OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Versi Ollama", "On": "Hidup", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Hanya aksara alfanumerik dan sempang dibenarkan dalam rentetan arahan.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Pilih model", "Select only one model to call": "Pilih hanya satu model untuk dipanggil", "Selected model(s) do not support image inputs": "Model dipilih tidak menyokong input imej", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Sentuh untuk mengganggu", "Tasks": "", "Tavily API Key": "Kunci API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "'Top P'", "Transformers": "", "Trouble accessing Ollama?": "Masalah mengakses Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model TTS", "TTS Settings": "Tetapan TTS", "TTS Voice": "Suara TTS", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 10823235be3..f7208e6d96f 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "En oppgavemodell brukes når du utfører oppgaver som å generere titler for samtaler eller utfører søkeforespørsler på nettet", "a user": "en bruker", "About": "Om", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Tilgang", "Access Control": "Tilgangskontroll", "Accessible to all users": "Tilgjengelig for alle brukere", @@ -93,7 +94,7 @@ "Artifacts": "Artifakter", "Ask a question": "Still et spørsmål", "Assistant": "Assistent", - "Attach file": "Legg ved fil", + "Attach file from knowledge": "", "Attention to detail": "Fokus på detaljer", "Attribute for Mail": "", "Attribute for Username": "Attributt for brukernavn", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Ikke installer funksjoner fra kilder du ikke stoler på.", "Do not install tools from sources you do not fully trust.": "Ikke installer verktøy fra kilder du ikke stoler på.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentasjon", "Documents": "Dokumenter", "does not make any external connections, and your data stays securely on your locally hosted server.": "ikke ingen tilkobling til eksterne tjenester. Dataene dine forblir sikkert på den lokale serveren.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Aktivert vurdering av meldinger", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Aktiver Mirostat-sampling for kontroll av perpleksitet. (Standard: 0, 0 = deaktivert, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Aktiver nye registreringer", + "Enable OneDrive": "", "Enable Web Search": "Aktiver websøk", "Enabled": "Aktivert", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Angi Chunk-overlapp", "Enter Chunk Size": "Angi Chunk-størrelse", "Enter description": "Angi beskrivelse", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Angi Github Raw-URL", @@ -510,6 +516,7 @@ "General Settings": "Generelle innstillinger", "Generate an image": "", "Generate Image": "Generer bilde", + "Generate prompt pair": "", "Generating search query": "Genererer søkespørring", "Get started": "Kom i gang", "Get started with {{WEBUI_NAME}}": "Kom i gang med {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Lokal", "Local Models": "Lokale modeller", + "Location access not allowed": "", "Lost": "Tapt", "LTR": "LTR", "Made by Open WebUI Community": "Laget av OpenWebUI-fellesskapet", @@ -718,6 +726,7 @@ "Ollama API settings updated": "API-innstillinger for Ollama er oppdatert", "Ollama Version": "Ollama-versjon", "On": "Aktivert", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Bare alfanumeriske tegn og bindestreker er tillatt", "Only alphanumeric characters and hyphens are allowed in the command string.": "Bare alfanumeriske tegn og bindestreker er tillatt i kommandostrengen.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Bare samlinger kan redigeres, eller lag en ny kunnskapsbase for å kunne redigere / legge til dokumenter.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Velg motor", "Select Knowledge": "Velg kunnskap", - "Select model": "Velg modell", "Select only one model to call": "Velg bare én modell som skal kalles", "Selected model(s) do not support image inputs": "Valgte modell(er) støtter ikke bildeinndata", "Semantic distance to query": "Semantisk distanse til spørring", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Ledetekst for genering av etikett", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling brukes til å redusere innvirkningen av mindre sannsynlige tokens fra utdataene. En høyere verdi (f.eks. 2,0) vil redusere effekten mer, mens en verdi på 1,0 deaktiverer denne innstillingen. (standard: 1)", + "Talk to model": "", "Tap to interrupt": "Trykk for å avbryte", "Tasks": "", "Tavily API Key": "API-nøkkel for Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformatorer", "Trouble accessing Ollama?": "Problemer med å koble til Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS-modell", "TTS Settings": "TTS-innstillinger", "TTS Voice": "TTS-stemme", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 36f79bf145d..3423bee8d41 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Een taakmodel wordt gebruikt bij het uitvoeren van taken zoals het genereren van titels voor chats en zoekopdrachten op het internet", "a user": "een gebruiker", "About": "Over", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Toegang", "Access Control": "Toegangsbeheer", "Accessible to all users": "Toegankelijk voor alle gebruikers", @@ -93,7 +94,7 @@ "Artifacts": "Artefacten", "Ask a question": "Stel een vraag", "Assistant": "Assistent", - "Attach file": "Voeg een bestand toe", + "Attach file from knowledge": "", "Attention to detail": "Attention to detail", "Attribute for Mail": "", "Attribute for Username": "Attribuut voor gebruikersnaam", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Installeer geen functies vanuit bronnen die je niet volledig vertrouwt", "Do not install tools from sources you do not fully trust.": "Installeer geen tools vanuit bronnen die je niet volledig vertrouwt.", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentatie", "Documents": "Documenten", "does not make any external connections, and your data stays securely on your locally hosted server.": "maakt geen externe verbindingen, en je gegevens blijven veilig op je lokaal gehoste server.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Schakel berichtbeoordeling in", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat-sampling inschakelen voor het regelen van de perplexiteit. (Standaard: 0, 0 = uitgeschakeld, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Schakel nieuwe registraties in", + "Enable OneDrive": "", "Enable Web Search": "Zoeken op het web inschakelen", "Enabled": "Ingeschakeld", "Engine": "Engine", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Voeg Chunk Overlap toe", "Enter Chunk Size": "Voeg Chunk Size toe", "Enter description": "Voer beschrijving in", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Voer de Github Raw-URL in", @@ -510,6 +516,7 @@ "General Settings": "Algemene instellingen", "Generate an image": "", "Generate Image": "Genereer afbeelding", + "Generate prompt pair": "", "Generating search query": "Zoekopdracht genereren", "Get started": "Begin", "Get started with {{WEBUI_NAME}}": "Begin met {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Lokaal", "Local Models": "Lokale modellen", + "Location access not allowed": "", "Lost": "Verloren", "LTR": "LNR", "Made by Open WebUI Community": "Gemaakt door OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API-instellingen bijgewerkt", "Ollama Version": "Ollama Versie", "On": "Aan", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Alleen alfanumerieke tekens en koppeltekens zijn toegestaan", "Only alphanumeric characters and hyphens are allowed in the command string.": "Alleen alfanumerieke karakters en streepjes zijn toegestaan in de commando string.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Alleen verzamelinge kunnen gewijzigd worden, maak een nieuwe kennisbank aan om bestanden aan te passen/toe te voegen", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Selecteer Engine", "Select Knowledge": "Selecteer kennis", - "Select model": "Selecteer een model", "Select only one model to call": "Selecteer maar één model om aan te roepen", "Selected model(s) do not support image inputs": "Geselecteerde modellen ondersteunen geen beeldinvoer", "Semantic distance to query": "Semantische afstand tot query", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Prompt voor taggeneratie", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling wordt gebruikt om de impact van minder waarschijnlijke tokens uit de uitvoer te verminderen. Een hogere waarde (bv. 2,0) zal de impact meer verminderen, terwijl een waarde van 1,0 deze instelling uitschakelt. (standaard: 1)", + "Talk to model": "", "Tap to interrupt": "Tik om te onderbreken", "Tasks": "", "Tavily API Key": "Tavily API-sleutel", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemen met toegang tot Ollama?", + "Trust Proxy Environment": "", "TTS Model": "TTS Model", "TTS Settings": "TTS instellingen", "TTS Voice": "TTS Stem", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index ee638bb229d..c22d6042e02 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "ਚੈਟਾਂ ਅਤੇ ਵੈੱਬ ਖੋਜ ਪੁੱਛਗਿੱਛਾਂ ਵਾਸਤੇ ਸਿਰਲੇਖ ਤਿਆਰ ਕਰਨ ਵਰਗੇ ਕਾਰਜ ਾਂ ਨੂੰ ਕਰਦੇ ਸਮੇਂ ਇੱਕ ਕਾਰਜ ਮਾਡਲ ਦੀ ਵਰਤੋਂ ਕੀਤੀ ਜਾਂਦੀ ਹੈ", "a user": "ਇੱਕ ਉਪਭੋਗਤਾ", "About": "ਬਾਰੇ", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "ਫਾਈਲ ਜੋੜੋ", + "Attach file from knowledge": "", "Attention to detail": "ਵੇਰਵੇ 'ਤੇ ਧਿਆਨ", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "ਡਾਕੂਮੈਂਟ", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "ਡਾਕੂਮੈਂਟ", "does not make any external connections, and your data stays securely on your locally hosted server.": "ਕੋਈ ਬਾਹਰੀ ਕਨੈਕਸ਼ਨ ਨਹੀਂ ਬਣਾਉਂਦਾ, ਅਤੇ ਤੁਹਾਡਾ ਡਾਟਾ ਤੁਹਾਡੇ ਸਥਾਨਕ ਸਰਵਰ 'ਤੇ ਸੁਰੱਖਿਅਤ ਰਹਿੰਦਾ ਹੈ।", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "ਨਵੇਂ ਸਾਈਨ ਅਪ ਯੋਗ ਕਰੋ", + "Enable OneDrive": "", "Enable Web Search": "ਵੈੱਬ ਖੋਜ ਨੂੰ ਸਮਰੱਥ ਕਰੋ", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "ਚੰਕ ਓਵਰਲੈਪ ਦਰਜ ਕਰੋ", "Enter Chunk Size": "ਚੰਕ ਆਕਾਰ ਦਰਜ ਕਰੋ", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Github ਕੱਚਾ URL ਦਾਖਲ ਕਰੋ", @@ -510,6 +516,7 @@ "General Settings": "ਆਮ ਸੈਟਿੰਗਾਂ", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "ਖੋਜ ਪੁੱਛਗਿੱਛ ਤਿਆਰ ਕਰਨਾ", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "ਓਪਨਵੈਬਯੂਆਈ ਕਮਿਊਨਿਟੀ ਦੁਆਰਾ ਬਣਾਇਆ ਗਿਆ", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "ਓਲਾਮਾ ਵਰਜਨ", "On": "ਚਾਲੂ", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "ਕਮਾਂਡ ਸਤਰ ਵਿੱਚ ਸਿਰਫ਼ ਅਲਫ਼ਾਨਯੂਮੈਰਿਕ ਅੱਖਰ ਅਤੇ ਹਾਈਫਨ ਦੀ ਆਗਿਆ ਹੈ।", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "ਮਾਡਲ ਚੁਣੋ", "Select only one model to call": "", "Selected model(s) do not support image inputs": "ਚੁਣੇ ਗਏ ਮਾਡਲ(ਆਂ) ਚਿੱਤਰ ਇਨਪੁੱਟਾਂ ਦਾ ਸਮਰਥਨ ਨਹੀਂ ਕਰਦੇ", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "ਸਿਖਰ P", "Transformers": "", "Trouble accessing Ollama?": "ਓਲਾਮਾ ਤੱਕ ਪਹੁੰਚਣ ਵਿੱਚ ਮੁਸ਼ਕਲ?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "TTS ਸੈਟਿੰਗਾਂ", "TTS Voice": "", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 4bae0c769c1..51b5562ccc9 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model zadań jest wykorzystywany podczas realizacji zadań, takich jak generowanie tytułów rozmów i zapytań wyszukiwania internetowego.", "a user": "użytkownik", "About": "O nas", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Dostęp", "Access Control": "Kontrola dostępu", "Accessible to all users": "Dostępny dla wszystkich użytkowników", @@ -93,7 +94,7 @@ "Artifacts": "Artefakty", "Ask a question": "Zadaj pytanie", "Assistant": "Asystent", - "Attach file": "Dołącz plik", + "Attach file from knowledge": "", "Attention to detail": "Dbałość o szczegóły", "Attribute for Mail": "Atrybut dla poczty", "Attribute for Username": "Atrybut dla nazwy użytkownika", @@ -182,6 +183,7 @@ "Code execution": "Wykonanie kodu", "Code Execution": "Wykonanie kodu", "Code Execution Engine": "Silnik wykonawczy kodu", + "Code Execution Timeout": "", "Code formatted successfully": "Kod został sformatowany pomyślnie.", "Code Interpreter": "Interpreter kodu", "Code Interpreter Engine": "Silnik interpretatora kodu", @@ -304,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Nie instaluj funkcji ze źródeł, którym nie ufasz w pełni.", "Do not install tools from sources you do not fully trust.": "Nie instaluj narzędzi ze źródeł, którym nie ufasz w pełni.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentacja", "Documents": "Dokumenty", "does not make any external connections, and your data stays securely on your locally hosted server.": "nie nawiązuje żadnych zewnętrznych połączeń, a Twoje dane pozostają bezpiecznie na Twoim lokalnie hostowanym serwerze.", @@ -321,6 +325,7 @@ "Draw": "Rysuj", "Drop any files here to add to the conversation": "Przeciągnij i upuść pliki tutaj, aby dodać je do rozmowy.", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "np. '30s', '10m'. Poprawne jednostki czasu to: 's' (sekunda), 'm' (minuta), 'h' (godzina).", + "e.g. 60": "", "e.g. A filter to remove profanity from text": "np. Filtr do usuwania wulgaryzmów z tekstu", "e.g. My Filter": "np. Mój filtr", "e.g. My Tools": "np. Moje narzędzia", @@ -352,6 +357,7 @@ "Enable Message Rating": "Włącz ocenianie wiadomości", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Włącz próbkowanie Mirostat w celu kontrolowania perplexity. (Domyślnie: 0, 0 = Wyłączone, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Włącz nowe rejestracje", + "Enable OneDrive": "", "Enable Web Search": "Włączanie wyszukiwania internetowego", "Enabled": "Włączone", "Engine": "Silnik", @@ -370,6 +376,8 @@ "Enter Chunk Overlap": "Wprowadź nakładanie się bloków", "Enter Chunk Size": "Wprowadź wielkość bloku", "Enter description": "Wprowadź opis", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Wprowadź domeny oddzielone przecinkami (np. example.com, site.org)", "Enter Exa API Key": "Wprowadź klucz API Exa", "Enter Github Raw URL": "Wprowadź surowy adres URL usługi GitHub", @@ -408,6 +416,7 @@ "Enter Tavily API Key": "Wprowadź klucz API Tavily", "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Wprowadź publiczny adres URL Twojego WebUI. Ten adres URL zostanie użyty do generowania linków w powiadomieniach.", "Enter Tika Server URL": "Wprowadź adres URL serwera Tika", + "Enter timeout in seconds": "", "Enter Top K": "Wprowadź {Top K}", "Enter URL (e.g. http://127.0.0.1:7860/)": "Podaj adres URL (np. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Wprowadź adres URL (np. http://localhost:11434)", @@ -500,10 +509,14 @@ "Functions allow arbitrary code execution": "Funkcje umożliwiają wykonanie dowolnego kodu", "Functions allow arbitrary code execution.": "Funkcje umożliwiają wykonanie dowolnego kodu.", "Functions imported successfully": "Funkcje zostały pomyślnie zaimportowane", + "Gemini": "", + "Gemini API Config": "", + "Gemini API Key is required.": "", "General": "Ogólne", "General Settings": "Ustawienia ogólne", "Generate an image": "Wygeneruj obraz", "Generate Image": "Wygeneruj obraz", + "Generate prompt pair": "", "Generating search query": "Tworzenie zapytania wyszukiwania", "Get started": "Rozpocznij", "Get started with {{WEBUI_NAME}}": "Rozpocznij pracę z {{WEBUI_NAME}}", @@ -609,6 +622,7 @@ "Loading Kokoro.js...": "Wczytywanie Kokoro.js...", "Local": "Lokalny", "Local Models": "Modele lokalne", + "Location access not allowed": "", "Lost": "Przegrał", "LTR": "LTR", "Made by Open WebUI Community": "Opracowane przez społeczność Open WebUI", @@ -712,6 +726,7 @@ "Ollama API settings updated": "Ustawienia API Ollama zostały zaktualizowane", "Ollama Version": "Wersja Ollama", "On": "Włączony", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Dozwolone są tylko znaki alfanumeryczne i myślniki", "Only alphanumeric characters and hyphens are allowed in the command string.": "W komendzie dozwolone są wyłącznie znaki alfanumeryczne i myślniki.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Tylko kolekcje można edytować, utwórz nową bazę wiedzy, aby edytować/dodawać dokumenty.", @@ -874,7 +889,6 @@ "Select an Ollama instance": "Wybierz instancję Ollama", "Select Engine": "Wybierz silnik", "Select Knowledge": "Wybierz wiedzę", - "Select model": "Wybierz model", "Select only one model to call": "Wybierz tylko jeden model do wywołania", "Selected model(s) do not support image inputs": "Wybrane modele nie obsługują danych wejściowych w formie obrazu", "Semantic distance to query": "Odległość semantyczna od zapytania", @@ -951,6 +965,7 @@ "Tags Generation": "Generowanie tagów", "Tags Generation Prompt": "Podpowiedź do generowania tagów", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Próbkowanie bez ogona jest używane do zmniejszenia wpływu mniej prawdopodobnych tokenów na wyjście. Wyższa wartość (np. 2,0) zmniejszy ten wpływ bardziej, podczas gdy wartość 1,0 wyłącza to ustawienie. (domyślnie: 1)", + "Talk to model": "", "Tap to interrupt": "Kliknij, aby przerwać", "Tasks": "Zadania", "Tavily API Key": "Klucz API Tavily", @@ -1035,6 +1050,7 @@ "Top P": "Najlepsze P", "Transformers": "Transformery", "Trouble accessing Ollama?": "Czy masz problemy z dostępem do Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model TTS", "TTS Settings": "Ustawienia syntezatora mowy", "TTS Voice": "Głos TTS", @@ -1141,4 +1157,4 @@ "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Cała Twoja wpłata trafi bezpośrednio do dewelopera wtyczki; Open WebUI nie pobiera żadnej prowizji. Należy jednak pamiętać, że wybrana platforma finansowania może mieć własne opłaty.", "Youtube": "Youtube", "Youtube Loader Settings": "Ustawienia pobierania z YouTube" -} \ No newline at end of file +} diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 421bb8a8a58..cb3b9c15106 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Um modelo de tarefa é usado ao realizar tarefas como gerar títulos para chats e consultas de pesquisa na web", "a user": "um usuário", "About": "Sobre", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Acesso", "Access Control": "Controle de Acesso", "Accessible to all users": "Accessível para todos os usuários", @@ -93,7 +94,7 @@ "Artifacts": "Artefatos", "Ask a question": "Faça uma pergunta", "Assistant": "Assistente", - "Attach file": "Anexar arquivo", + "Attach file from knowledge": "", "Attention to detail": "Atenção aos detalhes", "Attribute for Mail": "", "Attribute for Username": "Atribuir para nome de usuário", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Não instale funções de fontes que você não confia totalmente.", "Do not install tools from sources you do not fully trust.": "Não instale ferramentas de fontes que você não confia totalmente.", "Document": "Documento", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentação", "Documents": "Documentos", "does not make any external connections, and your data stays securely on your locally hosted server.": "não faz nenhuma conexão externa, e seus dados permanecem seguros no seu servidor local.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Ativar Avaliação de Mensagens", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilite a amostragem Mirostat para controlar a perplexidade. (Padrão: 0, 0 = Desativado, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Ativar Novos Cadastros", + "Enable OneDrive": "", "Enable Web Search": "Ativar Pesquisa na Web", "Enabled": "Ativado", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Digite a Sobreposição de Chunk", "Enter Chunk Size": "Digite o Tamanho do Chunk", "Enter description": "Digite a descrição", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Digite a URL bruta do Github", @@ -510,6 +516,7 @@ "General Settings": "Configurações Gerais", "Generate an image": "", "Generate Image": "Gerar Imagem", + "Generate prompt pair": "", "Generating search query": "Gerando consulta de pesquisa", "Get started": "Iniciar", "Get started with {{WEBUI_NAME}}": "Iniciar com {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Modelos Locais", + "Location access not allowed": "", "Lost": "Perdeu", "LTR": "Esquerda para Direita", "Made by Open WebUI Community": "Feito pela Comunidade OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Configurações da API Ollama atualizadas", "Ollama Version": "Versão Ollama", "On": "Ligado", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Somente caracteres alfanuméricos e hífens são permitidos", "Only alphanumeric characters and hyphens are allowed in the command string.": "Apenas caracteres alfanuméricos e hífens são permitidos na string de comando.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Somente coleções podem ser editadas. Crie uma nova base de conhecimento para editar/adicionar documentos.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Selecionar Motor", "Select Knowledge": "Selecionar Conhecimento", - "Select model": "Selecionar modelo", "Select only one model to call": "Selecione apenas um modelo para chamar", "Selected model(s) do not support image inputs": "Modelo(s) selecionado(s) não suportam entradas de imagem", "Semantic distance to query": "Distância semântica para consulta", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Prompt para geração de Tags", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "A amostragem *tail free* é usada para reduzir o impacto de tokens menos prováveis na saída. Um valor mais alto (por exemplo, 2,0) reduzirá mais o impacto, enquanto um valor de 1,0 desativa essa configuração. (Padrão: 1)", + "Talk to model": "", "Tap to interrupt": "Toque para interromper", "Tasks": "", "Tavily API Key": "Chave da API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemas para acessar o Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Modelo TTS", "TTS Settings": "Configurações TTS", "TTS Voice": "Voz TTS", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index 10334c5c694..d6630be9fba 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Um modelo de tarefa é usado ao executar tarefas como gerar títulos para bate-papos e consultas de pesquisa na Web", "a user": "um utilizador", "About": "Acerca de", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Anexar ficheiro", + "Attach file from knowledge": "", "Attention to detail": "Detalhado", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Documento", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentação", "Documents": "Documentos", "does not make any external connections, and your data stays securely on your locally hosted server.": "não faz conexões externas e os seus dados permanecem seguros no seu servidor alojado localmente.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Ativar Novas Inscrições", + "Enable OneDrive": "", "Enable Web Search": "Ativar pesquisa na Web", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Escreva a Sobreposição de Fragmento", "Enter Chunk Size": "Escreva o Tamanho do Fragmento", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Escreva o URL cru do Github", @@ -510,6 +516,7 @@ "General Settings": "Configurações Gerais", "Generate an image": "", "Generate Image": "Gerar imagem", + "Generate prompt pair": "", "Generating search query": "A gerar a consulta da pesquisa", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Modelos Locais", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Feito pela Comunidade OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Versão do Ollama", "On": "Ligado", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Apenas caracteres alfanuméricos e hífens são permitidos na string de comando.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Selecione o modelo", "Select only one model to call": "Selecione apenas um modelo para a chamada", "Selected model(s) do not support image inputs": "O(s) modelo(s) selecionado(s) não suporta(m) entradas de imagem", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemas a aceder ao Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Modelo TTS", "TTS Settings": "Configurações TTS", "TTS Voice": "Voz TTS", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index 68cd04df411..8f6d4f7fcd6 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un model de sarcină este utilizat pentru realizarea unor sarcini precum generarea de titluri pentru conversații și interogări de căutare pe web", "a user": "un utilizator", "About": "Despre", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "Artefacte", "Ask a question": "Pune o întrebare", "Assistant": "Asistent", - "Attach file": "Atașează fișier", + "Attach file from knowledge": "", "Attention to detail": "Atenție la detalii", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Nu instalați funcții din surse în care nu aveți încredere completă.", "Do not install tools from sources you do not fully trust.": "Nu instalați instrumente din surse în care nu aveți încredere completă.", "Document": "Document", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Documentație", "Documents": "Documente", "does not make any external connections, and your data stays securely on your locally hosted server.": "nu face nicio conexiune externă, iar datele tale rămân în siguranță pe serverul găzduit local.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Activează Evaluarea Mesajelor", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Activează Înscrierile Noi", + "Enable OneDrive": "", "Enable Web Search": "Activează Căutarea pe Web", "Enabled": "Activat", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Introduceți Suprapunerea Blocului", "Enter Chunk Size": "Introduceți Dimensiunea Blocului", "Enter description": "Introduceți descrierea", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Introduceți URL-ul Raw de pe Github", @@ -510,6 +516,7 @@ "General Settings": "Setări Generale", "Generate an image": "", "Generate Image": "Generează Imagine", + "Generate prompt pair": "", "Generating search query": "Se generează interogarea de căutare", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Modele Locale", + "Location access not allowed": "", "Lost": "Pierdut", "LTR": "LTR", "Made by Open WebUI Community": "Realizat de Comunitatea OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Versiune Ollama", "On": "Activat", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Doar caracterele alfanumerice și cratimele sunt permise în șirul de comandă.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Doar colecțiile pot fi editate, creați o nouă bază de cunoștințe pentru a edita/adăuga documente.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Selectează motorul", "Select Knowledge": "Selectarea cunoștințelor (Knowledge Selection) este un proces esențial în multiple domenii, incluzând inteligența artificială și învățarea automată. Aceasta presupune alegerea corectă a informațiilor sau datelor relevante dintr-un set mai mare pentru a le utiliza în analize, modele sau sisteme specifice. De exemplu, în învățarea automată, selectarea caracteristicilor este un aspect al selectării cunoștințelor și implică alegerea celor mai relevante date de intrare care contribuie la îmbunătățirea preciziei modelului.", - "Select model": "Selectează model", "Select only one model to call": "Selectează doar un singur model pentru apel", "Selected model(s) do not support image inputs": "Modelul(e) selectat(e) nu suportă intrări de imagine", "Semantic distance to query": "Distanța semantică față de interogare", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Generarea de Etichete Prompt", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Apasă pentru a întrerupe", "Tasks": "", "Tavily API Key": "Cheie API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Probleme la accesarea Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model TTS", "TTS Settings": "Setări TTS", "TTS Voice": "Voce TTS", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index a06dbf68b1c..83db06479e3 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Модель задач используется при выполнении таких задач, как генерация заголовков для чатов и поисковых запросов в Интернете", "a user": "пользователь", "About": "О программе", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Доступ", "Access Control": "Контроль доступа", "Accessible to all users": "Доступно всем пользователям", @@ -93,7 +94,7 @@ "Artifacts": "Артефакты", "Ask a question": "Задать вопрос", "Assistant": "Ассистент", - "Attach file": "Прикрепить файл", + "Attach file from knowledge": "", "Attention to detail": "Внимание к деталям", "Attribute for Mail": "", "Attribute for Username": "Атрибут для имени пользователя", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Не устанавливайте функции из источников, которым вы не полностью доверяете.", "Do not install tools from sources you do not fully trust.": "Не устанавливайте инструменты из источников, которым вы не полностью доверяете.", "Document": "Документ", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Документация", "Documents": "Документы", "does not make any external connections, and your data stays securely on your locally hosted server.": "не устанавливает никаких внешних соединений, и ваши данные надежно хранятся на вашем локальном сервере.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Разрешить оценку ответов", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Разрешить новые регистрации", + "Enable OneDrive": "", "Enable Web Search": "Включить поиск в Интернете", "Enabled": "Включено", "Engine": "Движок", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Введите перекрытие фрагмента", "Enter Chunk Size": "Введите размер фрагмента", "Enter description": "Введите описание", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Введите необработанный URL-адрес Github", @@ -510,6 +516,7 @@ "General Settings": "Общие настройки", "Generate an image": "", "Generate Image": "Сгенерировать изображение", + "Generate prompt pair": "", "Generating search query": "Генерация поискового запроса", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Локальные модели", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Сделано сообществом OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Версия Ollama", "On": "Включено", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Разрешены только буквенно-цифровые символы и дефисы.", "Only alphanumeric characters and hyphens are allowed in the command string.": "В строке команды разрешено использовать только буквенно-цифровые символы и дефисы.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Выберите движок", "Select Knowledge": "", - "Select model": "Выберите модель", "Select only one model to call": "Выберите только одну модель для вызова", "Selected model(s) do not support image inputs": "Выбранные модели не поддерживают ввод изображений", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Нажмите, чтобы прервать", "Tasks": "", "Tavily API Key": "Ключ API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Проблемы с доступом к Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Модель TTS", "TTS Settings": "Настройки TTS", "TTS Voice": "Голос TTS", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index 5d8ebcd9184..3be5f72a9b2 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model úloh sa používa pri vykonávaní úloh, ako je generovanie názvov pre chaty a vyhľadávacie dotazy na webe.", "a user": "užívateľ", "About": "O programe", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Prístup", "Access Control": "", "Accessible to all users": "Prístupné pre všetkých užívateľov", @@ -93,7 +94,7 @@ "Artifacts": "Artefakty", "Ask a question": "Opýtajte sa otázku", "Assistant": "Asistent", - "Attach file": "Pripojiť súbor", + "Attach file from knowledge": "", "Attention to detail": "Pozornosť k detailom", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Neinštalujte funkcie zo zdrojov, ktorým plne nedôverujete.", "Do not install tools from sources you do not fully trust.": "Neinštalujte nástroje zo zdrojov, ktorým plne nedôverujete.", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentácia", "Documents": "Dokumenty", "does not make any external connections, and your data stays securely on your locally hosted server.": "nevytvára žiadne externé pripojenia a vaše dáta zostávajú bezpečne na vašom lokálnom serveri.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Povoliť hodnotenie správ", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Povoliť nové registrácie", + "Enable OneDrive": "", "Enable Web Search": "Povoliť webové vyhľadávanie", "Enabled": "Povolené", "Engine": "Engine", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Zadajte prekryv časti", "Enter Chunk Size": "Zadajte veľkosť časti", "Enter description": "Zadajte popis", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Zadajte URL adresu Github Raw", @@ -510,6 +516,7 @@ "General Settings": "Všeobecné nastavenia", "Generate an image": "", "Generate Image": "Vygenerovať obrázok", + "Generate prompt pair": "", "Generating search query": "Generovanie vyhľadávacieho dotazu", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokálne modely", + "Location access not allowed": "", "Lost": "Stratený", "LTR": "LTR", "Made by Open WebUI Community": "Vytvorené komunitou OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Verzia Ollama", "On": "Zapnuté", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Príkazový reťazec môže obsahovať iba alfanumerické znaky a pomlčky.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Iba kolekcie môžu byť upravované, na úpravu/pridanie dokumentov vytvorte novú znalostnú databázu.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Vyberte engine", "Select Knowledge": "Vybrať znalosti", - "Select model": "Vyberte model", "Select only one model to call": "Vyberte iba jeden model, ktorý chcete použiť", "Selected model(s) do not support image inputs": "Vybraný(é) model(y) nepodporujú vstupy v podobe obrázkov.", "Semantic distance to query": "Sémantická vzdialenosť k dotazu", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "Prompt na generovanie značiek", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Klepnite na prerušenie", "Tasks": "", "Tavily API Key": "Kľúč API pre Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Máte problémy s prístupom k Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Model prevodu textu na reč (TTS)", "TTS Settings": "Nastavenia TTS (Text-to-Speech)", "TTS Voice": "TTS hlas", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index 0b1a4a0f147..2297fa68ae2 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Модел задатка се користи приликом извршавања задатака као што су генерисање наслова за ћаскања и упите за Веб претрагу", "a user": "корисник", "About": "О нама", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Приступ", "Access Control": "Контрола приступа", "Accessible to all users": "Доступно свим корисницима", @@ -93,7 +94,7 @@ "Artifacts": "Артефакти", "Ask a question": "Постави питање", "Assistant": "Помоћник", - "Attach file": "Приложи датотеку", + "Attach file from knowledge": "", "Attention to detail": "Пажња на детаље", "Attribute for Mail": "Особина е-поруке", "Attribute for Username": "Особина корисника", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Документ", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Документација", "Documents": "Документи", "does not make any external connections, and your data stays securely on your locally hosted server.": "не отвара никакве спољне везе и ваши подаци остају сигурно на вашем локално хостованом серверу.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Омогући нове пријаве", + "Enable OneDrive": "", "Enable Web Search": "Омогући Wеб претрагу", "Enabled": "Омогућено", "Engine": "Мотор", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Унесите преклапање делова", "Enter Chunk Size": "Унесите величину дела", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Унесите Гитхуб Раw УРЛ адресу", @@ -510,6 +516,7 @@ "General Settings": "Општа подешавања", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "Генерисање упита претраге", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Локално", "Local Models": "Локални модели", + "Location access not allowed": "", "Lost": "Пораза", "LTR": "ЛНД", "Made by Open WebUI Community": "Израдила OpenWebUI заједница", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Издање Ollama-е", "On": "Укључено", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Само алфанумерички знакови и цртице су дозвољени у низу наредби.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Изабери мотор", "Select Knowledge": "Изабери знање", - "Select model": "Изабери модел", "Select only one model to call": "", "Selected model(s) do not support image inputs": "Изабрани модели не подржавају уносе слика", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "Стварање ознака", "Tags Generation Prompt": "Упит стварања ознака", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Топ П", "Transformers": "", "Trouble accessing Ollama?": "Проблеми са приступом Ollama-и?", + "Trust Proxy Environment": "", "TTS Model": "TTS модел", "TTS Settings": "TTS подешавања", "TTS Voice": "TTS глас", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index 3ce482581b9..c899b1f765d 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "En uppgiftsmodell används när du utför uppgifter som att generera titlar för chattar och webbsökningsfrågor", "a user": "en användare", "About": "Om", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Bifoga fil", + "Attach file from knowledge": "", "Attention to detail": "Detaljerad uppmärksamhet", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "Dokument", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dokumentation", "Documents": "Dokument", "does not make any external connections, and your data stays securely on your locally hosted server.": "gör inga externa anslutningar, och dina data förblir säkra på din lokalt värdade server.", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Aktivera nya registreringar", + "Enable OneDrive": "", "Enable Web Search": "Aktivera webbsökning", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Ange chunköverlappning", "Enter Chunk Size": "Ange chunkstorlek", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Ange Github Raw URL", @@ -510,6 +516,7 @@ "General Settings": "Allmänna inställningar", "Generate an image": "", "Generate Image": "Generera bild", + "Generate prompt pair": "", "Generating search query": "Genererar sökfråga", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "Lokala modeller", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Skapad av OpenWebUI Community", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Ollama-version", "On": "På", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Endast alfanumeriska tecken och bindestreck är tillåtna i kommandosträngen.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "Välj en modell", "Select only one model to call": "Välj endast en modell att ringa", "Selected model(s) do not support image inputs": "Valda modeller stöder inte bildinmatningar", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Topp P", "Transformers": "", "Trouble accessing Ollama?": "Problem med att komma åt Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Text-till-tal-modell", "TTS Settings": "Text-till-tal-inställningar", "TTS Voice": "Text-till-tal-röst", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 41b3723bf66..8484bf906fa 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "ใช้โมเดลงานเมื่อทำงานเช่นการสร้างหัวข้อสำหรับการสนทนาและการค้นหาเว็บ", "a user": "ผู้ใช้", "About": "เกี่ยวกับ", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "แนบไฟล์", + "Attach file from knowledge": "", "Attention to detail": "ใส่ใจในรายละเอียด", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "อย่าติดตั้งฟังก์ชันจากแหล่งที่คุณไม่ไว้วางใจอย่างเต็มที่", "Do not install tools from sources you do not fully trust.": "อย่าติดตั้งเครื่องมือจากแหล่งที่คุณไม่ไว้วางใจอย่างเต็มที่", "Document": "เอกสาร", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "เอกสารประกอบ", "Documents": "เอกสาร", "does not make any external connections, and your data stays securely on your locally hosted server.": "ไม่เชื่อมต่อภายนอกใดๆ และข้อมูลของคุณจะอยู่บนเซิร์ฟเวอร์ที่โฮสต์ในท้องถิ่นของคุณอย่างปลอดภัย", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "เปิดใช้งานการสมัครใหม่", + "Enable OneDrive": "", "Enable Web Search": "เปิดใช้งานการค้นหาเว็บ", "Enabled": "เปิดใช้งาน", "Engine": "เครื่องยนต์", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "ใส่การทับซ้อนส่วนข้อมูล", "Enter Chunk Size": "ใส่ขนาดส่วนข้อมูล", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "ใส่ URL ดิบของ Github", @@ -510,6 +516,7 @@ "General Settings": "การตั้งค่าทั่วไป", "Generate an image": "", "Generate Image": "สร้างภาพ", + "Generate prompt pair": "", "Generating search query": "สร้างคำค้นหา", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "โมเดลท้องถิ่น", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "สร้างโดยชุมชน OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "เวอร์ชั่น Ollama", "On": "เปิด", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "อนุญาตให้ใช้เฉพาะอักขระตัวอักษรและตัวเลข รวมถึงเครื่องหมายขีดกลางในสตริงคำสั่งเท่านั้น", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "เลือกโมเดล", "Select only one model to call": "เลือกเพียงโมเดลเดียวที่จะใช้", "Selected model(s) do not support image inputs": "โมเดลที่เลือกไม่รองรับภาพ", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "แตะเพื่อขัดจังหวะ", "Tasks": "", "Tavily API Key": "คีย์ API ของ Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "มีปัญหาในการเข้าถึง Ollama?", + "Trust Proxy Environment": "", "TTS Model": "โมเดลแปลงข้อความเป็นเสียง", "TTS Settings": "การตั้งค่าแปลงข้อความเป็นเสียง", "TTS Voice": "เสียงแปลงข้อความเป็นเสียง", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index 9e52a557b2f..b77efe5b742 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "", "a user": "", "About": "", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "", + "Attach file from knowledge": "", "Attention to detail": "", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", "Document": "", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "", "Documents": "", "does not make any external connections, and your data stays securely on your locally hosted server.": "", @@ -354,6 +357,7 @@ "Enable Message Rating": "", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "", + "Enable OneDrive": "", "Enable Web Search": "", "Enabled": "", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "", "Enter Chunk Size": "", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "", @@ -510,6 +516,7 @@ "General Settings": "", "Generate an image": "", "Generate Image": "", + "Generate prompt pair": "", "Generating search query": "", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "", "Made by Open WebUI Community": "", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "", "On": "", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "", "Select Knowledge": "", - "Select model": "", "Select only one model to call": "", "Selected model(s) do not support image inputs": "", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "", "TTS Voice": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index b6518a1e2cb..2c7d24ff815 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Bir görev modeli, sohbetler ve web arama sorguları için başlık oluşturma gibi görevleri yerine getirirken kullanılır", "a user": "bir kullanıcı", "About": "Hakkında", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Erişim", "Access Control": "Erişim Kontrolü", "Accessible to all users": "Tüm kullanıcılara erişilebilir", @@ -93,7 +94,7 @@ "Artifacts": "Eserler", "Ask a question": "Bir soru sorun", "Assistant": "Asistan", - "Attach file": "Dosya ekle", + "Attach file from knowledge": "", "Attention to detail": "Ayrıntılara dikkat", "Attribute for Mail": "", "Attribute for Username": "Kullanıcı Adı için Özellik", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Tamamen güvenmediğiniz kaynaklardan fonksiyonlar yüklemeyin.", "Do not install tools from sources you do not fully trust.": "Tamamen güvenmediğiniz kaynaklardan araçlar yüklemeyin.", "Document": "Belge", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Dökümantasyon", "Documents": "Belgeler", "does not make any external connections, and your data stays securely on your locally hosted server.": "herhangi bir harici bağlantı yapmaz ve verileriniz güvenli bir şekilde yerel olarak barındırılan sunucunuzda kalır.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Mesaj Değerlendirmeyi Etkinleştir", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Perplexity'yi kontrol etmek için Mirostat örnekleme özelliğini etkinleştirin. (Varsayılan: 0, 0 = Devre Dışı, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Yeni Kayıtları Etkinleştir", + "Enable OneDrive": "", "Enable Web Search": "Web Aramasını Etkinleştir", "Enabled": "Etkin", "Engine": "Motor", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Chunk Örtüşmesini Girin", "Enter Chunk Size": "Chunk Boyutunu Girin", "Enter description": "Açıklama girin", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Github Raw URL'sini girin", @@ -510,6 +516,7 @@ "General Settings": "Genel Ayarlar", "Generate an image": "", "Generate Image": "Görsel Üret", + "Generate prompt pair": "", "Generating search query": "Arama sorgusu oluşturma", "Get started": "Başlayın", "Get started with {{WEBUI_NAME}}": "{{WEBUI_NAME}} ile başlayın", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "Yerel", "Local Models": "Yerel Modeller", + "Location access not allowed": "", "Lost": "Kayıp", "LTR": "Soldan Sağa", "Made by Open WebUI Community": "OpenWebUI Topluluğu tarafından yapılmıştır", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API ayarları güncellendi", "Ollama Version": "Ollama Sürümü", "On": "Açık", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Yalnızca alfasayısal karakterler ve tireler kabul edilir", "Only alphanumeric characters and hyphens are allowed in the command string.": "Komut dizisinde yalnızca alfasayısal karakterler ve tireler kabul edilir.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Yalnızca koleksiyonlar düzenlenebilir, belgeleri düzenlemek/eklemek için yeni bir bilgi tabanı oluşturun.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Motor Seç", "Select Knowledge": "Bilgi Seç", - "Select model": "Model seç", "Select only one model to call": "Arama için sadece bir model seç", "Selected model(s) do not support image inputs": "Seçilen model(ler) görüntü girişlerini desteklemiyor", "Semantic distance to query": "Sorguya semantik mesafe", @@ -957,6 +965,7 @@ "Tags Generation": "Etiketler Oluşturma", "Tags Generation Prompt": "Etiketler Oluşturma Promptu", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Durdurmak için dokunun", "Tasks": "", "Tavily API Key": "Tavily API Anahtarı", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Dönüştürücüler", "Trouble accessing Ollama?": "Ollama'ya erişmede sorun mu yaşıyorsunuz?", + "Trust Proxy Environment": "", "TTS Model": "TTS Modeli", "TTS Settings": "TTS Ayarları", "TTS Voice": "TTS Sesi", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index ba4ba31778a..2756394b5ed 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Модель задач використовується при виконанні таких завдань, як генерація заголовків для чатів та пошукових запитів в Інтернеті", "a user": "користувача", "About": "Про програму", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "Доступ", "Access Control": "Контроль доступу", "Accessible to all users": "Доступно всім користувачам", @@ -93,7 +94,7 @@ "Artifacts": "Артефакти", "Ask a question": "Задати питання", "Assistant": "Асистент", - "Attach file": "Прикріпити файл", + "Attach file from knowledge": "", "Attention to detail": "Увага до деталей", "Attribute for Mail": "Атрибут для пошти", "Attribute for Username": "Атрибут для імені користувача", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Не встановлюйте функції з джерел, яким ви не повністю довіряєте.", "Do not install tools from sources you do not fully trust.": "Не встановлюйте інструменти з джерел, яким ви не повністю довіряєте.", "Document": "Документ", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Документація", "Documents": "Документи", "does not make any external connections, and your data stays securely on your locally hosted server.": "не встановлює жодних зовнішніх з'єднань, і ваші дані залишаються в безпеці на вашому локальному сервері.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Увімкнути оцінку повідомлень", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Увімкнути вибірку Mirostat для контролю над непередбачуваністю. (За замовчуванням: 0, 0 = Вимкнено, 1 = Mirostat, 2 = Mirostat 2.0)", "Enable New Sign Ups": "Дозволити нові реєстрації", + "Enable OneDrive": "", "Enable Web Search": "Увімкнути веб-пошук", "Enabled": "Увімкнено", "Engine": "Рушій", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Введіть перекриття фрагменту", "Enter Chunk Size": "Введіть розмір фрагменту", "Enter description": "Введіть опис", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Введіть домени, розділені комами (наприклад, example.com, site.org)", "Enter Exa API Key": "Введіть ключ API Exa", "Enter Github Raw URL": "Введіть Raw URL-адресу Github", @@ -510,6 +516,7 @@ "General Settings": "Загальні налаштування", "Generate an image": "Згенерувати зображення", "Generate Image": "Створити зображення", + "Generate prompt pair": "", "Generating search query": "Сформувати пошуковий запит", "Get started": "Почати", "Get started with {{WEBUI_NAME}}": "Почати з {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "Завантаження Kokoro.js...", "Local": "Локальний", "Local Models": "Локальні моделі", + "Location access not allowed": "", "Lost": "Втрачене", "LTR": "LTR", "Made by Open WebUI Community": "Зроблено спільнотою OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Налаштування Ollama API оновлено", "Ollama Version": "Версія Ollama", "On": "Увімк", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "Дозволені тільки алфавітно-цифрові символи та дефіси", "Only alphanumeric characters and hyphens are allowed in the command string.": "У рядку команди дозволено використовувати лише алфавітно-цифрові символи та дефіси.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Редагувати можна лише колекції, створіть нову базу знань, щоб редагувати або додавати документи.", @@ -880,7 +889,6 @@ "Select an Ollama instance": "Виберіть екземпляр Ollama", "Select Engine": "Виберіть двигун", "Select Knowledge": "Вибрати знання", - "Select model": "Обрати модель", "Select only one model to call": "Оберіть лише одну модель для виклику", "Selected model(s) do not support image inputs": "Вибрані модель(і) не підтримують вхідні зображення", "Semantic distance to query": "Семантична відстань до запиту", @@ -957,6 +965,7 @@ "Tags Generation": "Генерація тегів", "Tags Generation Prompt": "Підказка для генерації тегів", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Вибірка з відрізанням хвоста використовується для зменшення впливу малоймовірних токенів на результат. Вищі значення (напр., 2.0) зменшують цей вплив більше, в той час як значення 1.0 вимикає цю настройку. (За замовчуванням: 1)", + "Talk to model": "", "Tap to interrupt": "Натисніть, щоб перервати", "Tasks": "", "Tavily API Key": "Ключ API Tavily", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Трансформери", "Trouble accessing Ollama?": "Проблеми з доступом до Ollama?", + "Trust Proxy Environment": "", "TTS Model": "Модель TTS", "TTS Settings": "Налаштування TTS", "TTS Voice": "Голос TTS", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 01375ea2c2d..d85c8331422 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "ٹاسک ماڈل اس وقت استعمال ہوتا ہے جب چیٹس کے عنوانات اور ویب سرچ سوالات تیار کیے جا رہے ہوں", "a user": "ایک صارف", "About": "بارے میں", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "نوادرات", "Ask a question": "سوال پوچھیں", "Assistant": "اسسٹنٹ", - "Attach file": "فائل منسلک کریں", + "Attach file from knowledge": "", "Attention to detail": "تفصیل پر توجہ", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "ایسی جگہوں سے فنکشنز انسٹال نہ کریں جن پر آپ مکمل بھروسہ نہیں کرتے", "Do not install tools from sources you do not fully trust.": "جن ذرائع پر آپ مکمل بھروسہ نہیں کرتے، ان سے ٹولز انسٹال نہ کریں", "Document": "دستاویز", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "دستاویزات", "Documents": "دستاویزات", "does not make any external connections, and your data stays securely on your locally hosted server.": "آپ کا ڈیٹا مقامی طور پر میزبانی شدہ سرور پر محفوظ رہتا ہے اور کوئی بیرونی رابطے نہیں بناتا", @@ -354,6 +357,7 @@ "Enable Message Rating": "پیغام کی درجہ بندی فعال کریں", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "نئے سائن اپس کو فعال کریں", + "Enable OneDrive": "", "Enable Web Search": "ویب تلاش فعال کریں", "Enabled": "فعال کردیا گیا ہے", "Engine": "انجن", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "چنک اوورلیپ درج کریں", "Enter Chunk Size": "چنک سائز درج کریں", "Enter description": "تفصیل درج کریں", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "گیٹ ہب را یو آر ایل درج کریں", @@ -510,6 +516,7 @@ "General Settings": "عمومی ترتیبات", "Generate an image": "", "Generate Image": "تصویر بنائیں", + "Generate prompt pair": "", "Generating search query": "تلاش کے لیے سوالیہ عبارت تیار کی جا رہی ہے", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "مقامی ماڈلز", + "Location access not allowed": "", "Lost": "گم شدہ", "LTR": "بائیں سے دائیں", "Made by Open WebUI Community": "اوپن ویب یو آئی کمیونٹی کی جانب سے تیار کردہ", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "اولاما ورژن", "On": "چالو", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "کمانڈ سٹرنگ میں صرف حروفی، عددی کردار اور ہائفن کی اجازت ہے", "Only collections can be edited, create a new knowledge base to edit/add documents.": "صرف مجموعے ترمیم کیے جا سکتے ہیں، دستاویزات کو ترمیم یا شامل کرنے کے لیے نیا علمی بنیاد بنائیں", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "انجن منتخب کریں", "Select Knowledge": "علم منتخب کریں", - "Select model": "ماڈل منتخب کریں", "Select only one model to call": "صرف ایک ماڈل کو کال کرنے کے لئے منتخب کریں", "Selected model(s) do not support image inputs": "منتخب کردہ ماڈل(ز) تصویری ان پٹ کی حمایت نہیں کرتے", "Semantic distance to query": "سوال کے لیے معنوی فاصلہ", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "پرمپٹ کے لیے ٹیگز بنائیں", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "رکنے کے لئے ٹچ کریں", "Tasks": "", "Tavily API Key": "ٹاویلی API کلید", @@ -1041,6 +1050,7 @@ "Top P": "ٹاپ پی", "Transformers": "", "Trouble accessing Ollama?": "Ollama تک رسائی میں مشکل؟", + "Trust Proxy Environment": "", "TTS Model": "ٹی ٹی ایس ماڈل", "TTS Settings": "ٹی ٹی ایس ترتیبات", "TTS Voice": "ٹی ٹی ایس آواز", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 0fe97a54780..1d7b8b768e4 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Mô hình tác vụ được sử dụng khi thực hiện các tác vụ như tạo tiêu đề cho cuộc trò chuyện và truy vấn tìm kiếm trên web", "a user": "người sử dụng", "About": "Giới thiệu", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "", "Access Control": "", "Accessible to all users": "", @@ -93,7 +94,7 @@ "Artifacts": "", "Ask a question": "", "Assistant": "", - "Attach file": "Đính kèm file", + "Attach file from knowledge": "", "Attention to detail": "Có sự chú ý đến chi tiết của vấn đề", "Attribute for Mail": "", "Attribute for Username": "", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "Không cài đặt các functions từ các nguồn mà bạn không hoàn toàn tin tưởng.", "Do not install tools from sources you do not fully trust.": "Không cài đặt các tools từ những nguồn mà bạn không hoàn toàn tin tưởng.", "Document": "Tài liệu", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "Tài liệu", "Documents": "Tài liệu", "does not make any external connections, and your data stays securely on your locally hosted server.": "không thực hiện bất kỳ kết nối ngoài nào, và dữ liệu của bạn vẫn được lưu trữ an toàn trên máy chủ lưu trữ cục bộ của bạn.", @@ -354,6 +357,7 @@ "Enable Message Rating": "Cho phép phản hồi, đánh giá", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", "Enable New Sign Ups": "Cho phép đăng ký mới", + "Enable OneDrive": "", "Enable Web Search": "Cho phép tìm kiếm Web", "Enabled": "Đã bật", "Engine": "", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "Nhập Chunk chồng lấn (overlap)", "Enter Chunk Size": "Nhập Kích thước Chunk", "Enter description": "", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", "Enter Exa API Key": "", "Enter Github Raw URL": "Nhập URL cho Github Raw", @@ -510,6 +516,7 @@ "General Settings": "Cấu hình chung", "Generate an image": "", "Generate Image": "Sinh ảnh", + "Generate prompt pair": "", "Generating search query": "Tạo truy vấn tìm kiếm", "Get started": "", "Get started with {{WEBUI_NAME}}": "", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "", "Local": "", "Local Models": "", + "Location access not allowed": "", "Lost": "", "LTR": "LTR", "Made by Open WebUI Community": "Được tạo bởi Cộng đồng OpenWebUI", @@ -718,6 +726,7 @@ "Ollama API settings updated": "", "Ollama Version": "Phiên bản Ollama", "On": "Bật", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "", "Only alphanumeric characters and hyphens are allowed in the command string.": "Chỉ ký tự số và gạch nối được phép trong chuỗi lệnh.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "", @@ -880,7 +889,6 @@ "Select an Ollama instance": "", "Select Engine": "Chọn Engine", "Select Knowledge": "", - "Select model": "Chọn model", "Select only one model to call": "Chọn model để gọi", "Selected model(s) do not support image inputs": "Model được lựa chọn không hỗ trợ đầu vào là hình ảnh", "Semantic distance to query": "", @@ -957,6 +965,7 @@ "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Talk to model": "", "Tap to interrupt": "Chạm để ngừng", "Tasks": "", "Tavily API Key": "", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Gặp vấn đề khi truy cập Ollama?", + "Trust Proxy Environment": "", "TTS Model": "", "TTS Settings": "Cài đặt Chuyển văn bản thành Giọng nói", "TTS Voice": "", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index b0db571bace..8b056976312 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "任务模型用于执行生成对话标题和联网搜索查询等任务", "a user": "用户", "About": "关于", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "访问", "Access Control": "访问控制", "Accessible to all users": "对所有用户开放", @@ -93,7 +94,7 @@ "Artifacts": "Artifacts", "Ask a question": "提问", "Assistant": "AI模型", - "Attach file": "添加文件", + "Attach file from knowledge": "", "Attention to detail": "注重细节", "Attribute for Mail": "邮箱属性", "Attribute for Username": "用户名属性", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "切勿安装来源不完全可信的函数。", "Do not install tools from sources you do not fully trust.": "切勿安装来源不完全可信的工具。", "Document": "文档", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "帮助文档", "Documents": "文档", "does not make any external connections, and your data stays securely on your locally hosted server.": "不会与外部建立任何连接,您的数据会安全地存储在本地托管的服务器上。", @@ -354,6 +357,7 @@ "Enable Message Rating": "启用回复评价", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "启用 Mirostat 采样以控制困惑度。(默认值:0,0 = 禁用,1 = Mirostat,2 = Mirostat 2.0)", "Enable New Sign Ups": "允许新用户注册", + "Enable OneDrive": "", "Enable Web Search": "启用联网搜索", "Enabled": "启用", "Engine": "引擎", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "输入块重叠 (Chunk Overlap)", "Enter Chunk Size": "输入块大小 (Chunk Size)", "Enter description": "输入简介描述", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "输入以逗号分隔的域名(例如:example.com、site.org)", "Enter Exa API Key": "输入 Exa API 密钥", "Enter Github Raw URL": "输入 Github Raw 地址", @@ -510,6 +516,7 @@ "General Settings": "通用设置", "Generate an image": "生成图像", "Generate Image": "生成图像", + "Generate prompt pair": "", "Generating search query": "生成搜索查询", "Get started": "开始使用", "Get started with {{WEBUI_NAME}}": "开始使用 {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "载入 Kokoro.js...", "Local": "本地", "Local Models": "本地模型", + "Location access not allowed": "", "Lost": "落败", "LTR": "从左至右", "Made by Open WebUI Community": "由 OpenWebUI 社区制作", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API设置已更新", "Ollama Version": "Ollama 版本", "On": "开启", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "只允许使用英文字母,数字 (0-9) 以及连字符 (-)", "Only alphanumeric characters and hyphens are allowed in the command string.": "命令字符串中只允许使用英文字母,数字 (0-9) 以及连字符 (-)。", "Only collections can be edited, create a new knowledge base to edit/add documents.": "只能编辑文件集,创建一个新的知识库来编辑/添加文件。", @@ -880,7 +889,6 @@ "Select an Ollama instance": "选择一个 Ollama 实例。", "Select Engine": "选择引擎", "Select Knowledge": "选择知识", - "Select model": "选择模型", "Select only one model to call": "请仅选择一个模型来呼叫", "Selected model(s) do not support image inputs": "已选择的模型不支持发送图像", "Semantic distance to query": "语义距离查询", @@ -957,6 +965,7 @@ "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling 用于减少输出中可能性较低的Token的影响。数值越大(如 2.0),影响就越小,而数值为 1.0 则会禁用此设置。(默认值:1)", + "Talk to model": "", "Tap to interrupt": "点击以中断", "Tasks": "任务", "Tavily API Key": "Tavily API 密钥", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "访问 Ollama 时遇到问题?", + "Trust Proxy Environment": "", "TTS Model": "文本转语音模型", "TTS Settings": "文本转语音设置", "TTS Voice": "文本转语音音色", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index ed7cf841bb8..b3be351c703 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -13,6 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "執行產生對話標題和網頁搜尋查詢等任務時會使用任務模型", "a user": "一位使用者", "About": "關於", + "Accept autocomplete generation / Jump to prompt variable": "", "Access": "存取", "Access Control": "存取控制", "Accessible to all users": "所有使用者可存取", @@ -93,7 +94,7 @@ "Artifacts": "成品", "Ask a question": "提出問題", "Assistant": "助手", - "Attach file": "附加檔案", + "Attach file from knowledge": "", "Attention to detail": "注重細節", "Attribute for Mail": "使用者郵箱屬性", "Attribute for Username": "使用者名稱屬性", @@ -305,6 +306,8 @@ "Do not install functions from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝函式。", "Do not install tools from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝工具。", "Document": "文件", + "Document Intelligence": "", + "Document Intelligence endpoint and key required.": "", "Documentation": "文件", "Documents": "文件", "does not make any external connections, and your data stays securely on your locally hosted server.": "不會建立任何外部連線,而且您的資料會安全地儲存在您本機伺服器上。", @@ -354,6 +357,7 @@ "Enable Message Rating": "啟用訊息評分", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "啟用 Mirostat 採樣以控制困惑度。(預設:0,0 = 停用,1 = Mirostat,2 = Mirostat 2.0)", "Enable New Sign Ups": "允許新使用者註冊", + "Enable OneDrive": "", "Enable Web Search": "啟用網頁搜尋", "Enabled": "已啟用", "Engine": "引擎", @@ -372,6 +376,8 @@ "Enter Chunk Overlap": "輸入區塊重疊", "Enter Chunk Size": "輸入區塊大小", "Enter description": "輸入描述", + "Enter Document Intelligence Endpoint": "", + "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "輸入網域,以逗號分隔(例如:example.com, site.org)", "Enter Exa API Key": "輸入 Exa API 金鑰", "Enter Github Raw URL": "輸入 GitHub Raw URL", @@ -510,6 +516,7 @@ "General Settings": "一般設定", "Generate an image": "產生圖片", "Generate Image": "產生圖片", + "Generate prompt pair": "", "Generating search query": "正在產生搜尋查詢", "Get started": "開始使用", "Get started with {{WEBUI_NAME}}": "開始使用 {{WEBUI_NAME}}", @@ -615,6 +622,7 @@ "Loading Kokoro.js...": "Kokoro.js 載入中……", "Local": "本機", "Local Models": "本機模型", + "Location access not allowed": "", "Lost": "已遺失", "LTR": "從左到右", "Made by Open WebUI Community": "由 OpenWebUI 社群製作", @@ -718,6 +726,7 @@ "Ollama API settings updated": "Ollama API 設定已更新", "Ollama Version": "Ollama 版本", "On": "開啟", + "OneDrive": "", "Only alphanumeric characters and hyphens are allowed": "只允許使用英文字母、數字和連字號", "Only alphanumeric characters and hyphens are allowed in the command string.": "命令字串中只允許使用英文字母、數字和連字號。", "Only collections can be edited, create a new knowledge base to edit/add documents.": "只能編輯集合,請建立新的知識以編輯或新增文件。", @@ -880,7 +889,6 @@ "Select an Ollama instance": "選擇一個 Ollama 實例", "Select Engine": "選擇引擎", "Select Knowledge": "選擇知識庫", - "Select model": "選擇模型", "Select only one model to call": "僅選擇一個模型來呼叫", "Selected model(s) do not support image inputs": "選取的模型不支援圖片輸入", "Semantic distance to query": "與查詢的語義距離", @@ -957,6 +965,7 @@ "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "使用無尾採樣來減少較不可能的 token 對輸出的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 則停用此設定。(預設:1)", + "Talk to model": "", "Tap to interrupt": "點選以中斷", "Tasks": "任務", "Tavily API Key": "Tavily API 金鑰", @@ -1041,6 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "存取 Ollama 時遇到問題?", + "Trust Proxy Environment": "", "TTS Model": "文字轉語音 (TTS) 模型", "TTS Settings": "文字轉語音 (TTS) 設定", "TTS Voice": "文字轉語音 (TTS) 聲音", From cefe274f746ed6fac175ac7adc08a21492e8896a Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Tue, 25 Feb 2025 04:03:16 +0800 Subject: [PATCH 060/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index b3be351c703..cb6549b80d3 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -13,7 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "執行產生對話標題和網頁搜尋查詢等任務時會使用任務模型", "a user": "一位使用者", "About": "關於", - "Accept autocomplete generation / Jump to prompt variable": "", + "Accept autocomplete generation / Jump to prompt variable": "接受自動完成生成/跳轉至提示變數", "Access": "存取", "Access Control": "存取控制", "Accessible to all users": "所有使用者可存取", @@ -94,7 +94,7 @@ "Artifacts": "成品", "Ask a question": "提出問題", "Assistant": "助手", - "Attach file from knowledge": "", + "Attach file from knowledge": "從知識庫附加檔案", "Attention to detail": "注重細節", "Attribute for Mail": "使用者郵箱屬性", "Attribute for Username": "使用者名稱屬性", @@ -306,8 +306,8 @@ "Do not install functions from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝函式。", "Do not install tools from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝工具。", "Document": "文件", - "Document Intelligence": "", - "Document Intelligence endpoint and key required.": "", + "Document Intelligence": "Document Intelligence", + "Document Intelligence endpoint and key required.": "需提供 Document Intelligence 端點及金鑰", "Documentation": "文件", "Documents": "文件", "does not make any external connections, and your data stays securely on your locally hosted server.": "不會建立任何外部連線,而且您的資料會安全地儲存在您本機伺服器上。", @@ -357,7 +357,7 @@ "Enable Message Rating": "啟用訊息評分", "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "啟用 Mirostat 採樣以控制困惑度。(預設:0,0 = 停用,1 = Mirostat,2 = Mirostat 2.0)", "Enable New Sign Ups": "允許新使用者註冊", - "Enable OneDrive": "", + "Enable OneDrive": "啟用 OneDrive", "Enable Web Search": "啟用網頁搜尋", "Enabled": "已啟用", "Engine": "引擎", @@ -376,8 +376,8 @@ "Enter Chunk Overlap": "輸入區塊重疊", "Enter Chunk Size": "輸入區塊大小", "Enter description": "輸入描述", - "Enter Document Intelligence Endpoint": "", - "Enter Document Intelligence Key": "", + "Enter Document Intelligence Endpoint": "輸入 Document Intelligence 端點", + "Enter Document Intelligence Key": "輸入 Document Intelligence 金鑰", "Enter domains separated by commas (e.g., example.com,site.org)": "輸入網域,以逗號分隔(例如:example.com, site.org)", "Enter Exa API Key": "輸入 Exa API 金鑰", "Enter Github Raw URL": "輸入 GitHub Raw URL", @@ -516,7 +516,7 @@ "General Settings": "一般設定", "Generate an image": "產生圖片", "Generate Image": "產生圖片", - "Generate prompt pair": "", + "Generate prompt pair": "產生提示配對", "Generating search query": "正在產生搜尋查詢", "Get started": "開始使用", "Get started with {{WEBUI_NAME}}": "開始使用 {{WEBUI_NAME}}", @@ -622,7 +622,7 @@ "Loading Kokoro.js...": "Kokoro.js 載入中……", "Local": "本機", "Local Models": "本機模型", - "Location access not allowed": "", + "Location access not allowed": "位置存取未獲允許", "Lost": "已遺失", "LTR": "從左到右", "Made by Open WebUI Community": "由 OpenWebUI 社群製作", @@ -726,7 +726,7 @@ "Ollama API settings updated": "Ollama API 設定已更新", "Ollama Version": "Ollama 版本", "On": "開啟", - "OneDrive": "", + "OneDrive": "OneDrive", "Only alphanumeric characters and hyphens are allowed": "只允許使用英文字母、數字和連字號", "Only alphanumeric characters and hyphens are allowed in the command string.": "命令字串中只允許使用英文字母、數字和連字號。", "Only collections can be edited, create a new knowledge base to edit/add documents.": "只能編輯集合,請建立新的知識以編輯或新增文件。", @@ -965,7 +965,7 @@ "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "使用無尾採樣來減少較不可能的 token 對輸出的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 則停用此設定。(預設:1)", - "Talk to model": "", + "Talk to model": "與模型對話", "Tap to interrupt": "點選以中斷", "Tasks": "任務", "Tavily API Key": "Tavily API 金鑰", @@ -1050,7 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "存取 Ollama 時遇到問題?", - "Trust Proxy Environment": "", + "Trust Proxy Environment": "信任代理環境", "TTS Model": "文字轉語音 (TTS) 模型", "TTS Settings": "文字轉語音 (TTS) 設定", "TTS Voice": "文字轉語音 (TTS) 聲音", From 89aaf64209f4517b05c71648262eea1e870bc544 Mon Sep 17 00:00:00 2001 From: "Jason E. Jensen" Date: Mon, 24 Feb 2025 22:32:08 +0000 Subject: [PATCH 061/623] add optional usage to chatCompleted messages --- src/lib/components/chat/Chat.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index fcd5177d753..67ada0bc766 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -836,6 +836,7 @@ content: m.content, info: m.info ? m.info : undefined, timestamp: m.timestamp, + ...(m.usage ? { usage: m.usage } : {}), ...(m.sources ? { sources: m.sources } : {}) })), model_item: $models.find((m) => m.id === modelId), From cd3904046db1d1be316cb41e67470eaa110a60bc Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 24 Feb 2025 20:19:32 -0800 Subject: [PATCH 062/623] refac --- src/lib/components/common/CodeEditor.svelte | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/lib/components/common/CodeEditor.svelte b/src/lib/components/common/CodeEditor.svelte index 0c4a008f1ef..d545d7236a7 100644 --- a/src/lib/components/common/CodeEditor.svelte +++ b/src/lib/components/common/CodeEditor.svelte @@ -47,6 +47,10 @@ let codeEditor; + export const focus = () => { + codeEditor.focus(); + }; + let isDarkMode = false; let editorTheme = new Compartment(); let editorLanguage = new Compartment(); From e06111a362b87d571fcf9b7158864aec7b57b9d9 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 24 Feb 2025 23:21:03 -0800 Subject: [PATCH 063/623] refac --- src/lib/components/admin/Functions/FunctionEditor.svelte | 7 ++++--- src/lib/components/workspace/Tools/ToolkitEditor.svelte | 8 ++++---- src/routes/(app)/admin/functions/create/+page.svelte | 4 ++-- src/routes/(app)/admin/functions/edit/+page.svelte | 4 ++-- src/routes/(app)/workspace/tools/create/+page.svelte | 4 ++-- src/routes/(app)/workspace/tools/edit/+page.svelte | 4 ++-- 6 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/lib/components/admin/Functions/FunctionEditor.svelte b/src/lib/components/admin/Functions/FunctionEditor.svelte index a782591010f..6da2a83f45b 100644 --- a/src/lib/components/admin/Functions/FunctionEditor.svelte +++ b/src/lib/components/admin/Functions/FunctionEditor.svelte @@ -1,8 +1,7 @@ {#if modelIds.length > 0} From 2d379cb35f571956e9dce50733f5618958de75a4 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 23:19:19 -0800 Subject: [PATCH 096/623] chore: pyproject --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 5282a9dba3a..ccf48634625 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,9 @@ dependencies = [ "RestrictedPython==8.0", + "loguru==0.7.2", + "asgiref==3.8.1", + "openai", "anthropic", "google-generativeai==0.7.2", From fcbdfbd744c08272a8ea886bdd0f56d6887ef5dd Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 23:35:09 -0800 Subject: [PATCH 097/623] refac --- backend/open_webui/utils/audit.py | 2 +- backend/open_webui/utils/auth.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/utils/audit.py b/backend/open_webui/utils/audit.py index 95c0745a981..2d7ceabcb89 100644 --- a/backend/open_webui/utils/audit.py +++ b/backend/open_webui/utils/audit.py @@ -194,7 +194,7 @@ async def _get_authenticated_user(self, request: Request) -> UserModel: auth_header = request.headers.get("Authorization") assert auth_header - user = get_current_user(request, get_http_authorization_cred(auth_header)) + user = get_current_user(request, None, get_http_authorization_cred(auth_header)) return user diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index f4cf8543b44..cbc8b15aedb 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -205,7 +205,8 @@ def get_current_user( else: # Refresh the user's last active timestamp asynchronously # to prevent blocking the request - background_tasks.add_task(Users.update_user_last_active_by_id, user.id) + if background_tasks: + background_tasks.add_task(Users.update_user_last_active_by_id, user.id) return user else: raise HTTPException( From 0a4dbf7cf057617308f27df536d9143840d6ea70 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 23:41:09 -0800 Subject: [PATCH 098/623] refac --- src/lib/components/chat/Artifacts.svelte | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lib/components/chat/Artifacts.svelte b/src/lib/components/chat/Artifacts.svelte index 0a8ab956aba..d09858838fd 100644 --- a/src/lib/components/chat/Artifacts.svelte +++ b/src/lib/components/chat/Artifacts.svelte @@ -123,6 +123,8 @@ if (contents.length === 0) { showControls.set(false); showArtifacts.set(false); + + toast.error($i18n.t('No HTML, CSS, or JavaScript content found.')); } selectedContentIdx = contents ? contents.length - 1 : 0; From ce7cf62a5538c487230bf976cf32838febeaf048 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 23:51:39 -0800 Subject: [PATCH 099/623] refac: dedup --- backend/open_webui/retrieval/utils.py | 54 +++++++++++++-------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 011a7bad0d9..b6253e63cc2 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -5,6 +5,7 @@ import asyncio import requests +import hashlib from huggingface_hub import snapshot_download from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever @@ -175,46 +176,41 @@ def merge_get_results(get_results: list[dict]) -> dict: def merge_and_sort_query_results( query_results: list[dict], k: int, reverse: bool = False -) -> list[dict]: +) -> dict: # Initialize lists to store combined data - combined_distances = [] - combined_documents = [] - combined_metadatas = [] + combined = [] + seen_hashes = set() # To store unique document hashes for data in query_results: - combined_distances.extend(data["distances"][0]) - combined_documents.extend(data["documents"][0]) - combined_metadatas.extend(data["metadatas"][0]) + distances = data["distances"][0] + documents = data["documents"][0] + metadatas = data["metadatas"][0] + + for distance, document, metadata in zip(distances, documents, metadatas): + if isinstance(document, str): + doc_hash = hashlib.md5( + document.encode() + ).hexdigest() # Compute a hash for uniqueness - # Create a list of tuples (distance, document, metadata) - combined = list(zip(combined_distances, combined_documents, combined_metadatas)) + if doc_hash not in seen_hashes: + seen_hashes.add(doc_hash) + combined.append((distance, document, metadata)) # Sort the list based on distances combined.sort(key=lambda x: x[0], reverse=reverse) - # We don't have anything :-( - if not combined: - sorted_distances = [] - sorted_documents = [] - sorted_metadatas = [] - else: - # Unzip the sorted list - sorted_distances, sorted_documents, sorted_metadatas = zip(*combined) - - # Slicing the lists to include only k elements - sorted_distances = list(sorted_distances)[:k] - sorted_documents = list(sorted_documents)[:k] - sorted_metadatas = list(sorted_metadatas)[:k] + # Slice to keep only the top k elements + sorted_distances, sorted_documents, sorted_metadatas = ( + zip(*combined[:k]) if combined else ([], [], []) + ) - # Create the output dictionary - result = { - "distances": [sorted_distances], - "documents": [sorted_documents], - "metadatas": [sorted_metadatas], + # Create and return the output dictionary + return { + "distances": [list(sorted_distances)], + "documents": [list(sorted_documents)], + "metadatas": [list(sorted_metadatas)], } - return result - def get_all_items_from_collections(collection_names: list[str]) -> dict: results = [] From 8c4d967ef4fe9c308ee7ece05045fcc4681cf6b2 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 23:58:43 -0800 Subject: [PATCH 100/623] refac: evaluations settings ui --- .../admin/Settings/Evaluations.svelte | 88 ++++++++++--------- 1 file changed, 47 insertions(+), 41 deletions(-) diff --git a/src/lib/components/admin/Settings/Evaluations.svelte b/src/lib/components/admin/Settings/Evaluations.svelte index 805d5fc2de8..cf003504c5f 100644 --- a/src/lib/components/admin/Settings/Evaluations.svelte +++ b/src/lib/components/admin/Settings/Evaluations.svelte @@ -103,10 +103,12 @@
{#if evaluationConfig !== null}
-
{$i18n.t('General Settings')}
+
+
{$i18n.t('General')}
-
-
+
+ +
{$i18n.t('Arena Models')}
@@ -116,46 +118,50 @@
{#if evaluationConfig.ENABLE_EVALUATION_ARENA_MODELS} -
- -
-
{$i18n.t('Manage Arena Models')}
- -
- - - -
-
+
+
+
+ {$i18n.t('Manage')} +
-
- {#if (evaluationConfig?.EVALUATION_ARENA_MODELS ?? []).length > 0} - {#each evaluationConfig.EVALUATION_ARENA_MODELS as model, index} - { - editModelHandler(e.detail, index); - }} - on:delete={(e) => { - deleteModelHandler(index); - }} - /> - {/each} - {:else} -
- {$i18n.t( - `Using the default arena model with all models. Click the plus button to add custom models.` - )} +
+ + +
- {/if} +
+ +
+ +
+ {#if (evaluationConfig?.EVALUATION_ARENA_MODELS ?? []).length > 0} + {#each evaluationConfig.EVALUATION_ARENA_MODELS as model, index} + { + editModelHandler(e.detail, index); + }} + on:delete={(e) => { + deleteModelHandler(index); + }} + /> + {/each} + {:else} +
+ {$i18n.t( + `Using the default arena model with all models. Click the plus button to add custom models.` + )} +
+ {/if} +
{/if}
From b0617759322e4d5c03b740a20a07a868c3e3e331 Mon Sep 17 00:00:00 2001 From: kurtdami Date: Thu, 27 Feb 2025 00:12:41 -0800 Subject: [PATCH 101/623] feat: add perplexity integration to web search --- backend/open_webui/config.py | 6 ++ backend/open_webui/main.py | 2 + .../open_webui/retrieval/web/perplexity.py | 87 +++++++++++++++++++ backend/open_webui/routers/retrieval.py | 15 +++- .../admin/Settings/WebSearch.svelte | 14 ++- 5 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 backend/open_webui/retrieval/web/perplexity.py diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 5e0e4f0a179..b086778ba49 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1936,6 +1936,12 @@ class BannerModel(BaseModel): os.getenv("EXA_API_KEY", ""), ) +PERPLEXITY_API_KEY = PersistentConfig( + "PERPLEXITY_API_KEY", + "rag.web.search.perplexity_api_key", + os.getenv("PERPLEXITY_API_KEY", ""), +) + RAG_WEB_SEARCH_RESULT_COUNT = PersistentConfig( "RAG_WEB_SEARCH_RESULT_COUNT", "rag.web.search.result_count", diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 346d28d6c3d..801e1a0d46f 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -208,6 +208,7 @@ BING_SEARCH_V7_SUBSCRIPTION_KEY, BRAVE_SEARCH_API_KEY, EXA_API_KEY, + PERPLEXITY_API_KEY, KAGI_SEARCH_API_KEY, MOJEEK_SEARCH_API_KEY, BOCHA_SEARCH_API_KEY, @@ -584,6 +585,7 @@ async def lifespan(app: FastAPI): app.state.config.BING_SEARCH_V7_ENDPOINT = BING_SEARCH_V7_ENDPOINT app.state.config.BING_SEARCH_V7_SUBSCRIPTION_KEY = BING_SEARCH_V7_SUBSCRIPTION_KEY app.state.config.EXA_API_KEY = EXA_API_KEY +app.state.config.PERPLEXITY_API_KEY = PERPLEXITY_API_KEY app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = RAG_WEB_SEARCH_RESULT_COUNT app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_REQUESTS diff --git a/backend/open_webui/retrieval/web/perplexity.py b/backend/open_webui/retrieval/web/perplexity.py new file mode 100644 index 00000000000..e5314eb1f73 --- /dev/null +++ b/backend/open_webui/retrieval/web/perplexity.py @@ -0,0 +1,87 @@ +import logging +from typing import Optional, List +import requests + +from open_webui.retrieval.web.main import SearchResult, get_filtered_results +from open_webui.env import SRC_LOG_LEVELS + +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["RAG"]) + + +def search_perplexity( + api_key: str, + query: str, + count: int, + filter_list: Optional[list[str]] = None, +) -> list[SearchResult]: + """Search using Perplexity API and return the results as a list of SearchResult objects. + + Args: + api_key (str): A Perplexity API key + query (str): The query to search for + count (int): Maximum number of results to return + + """ + + # Handle PersistentConfig object + if hasattr(api_key, "__str__"): + api_key = str(api_key) + + try: + url = "https://api.perplexity.ai/chat/completions" + + # Create payload for the API call + payload = { + "model": "sonar", + "messages": [ + { + "role": "system", + "content": "You are a search assistant. Provide factual information with citations.", + }, + {"role": "user", "content": query}, + ], + "temperature": 0.2, # Lower temperature for more factual responses + "stream": False, + } + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + # Make the API request + response = requests.request("POST", url, json=payload, headers=headers) + + # Parse the JSON response + json_response = response.json() + + # Extract citations from the response + citations = json_response.get("citations", []) + + # Create search results from citations + results = [] + for i, citation in enumerate(citations[:count]): + # Extract content from the response to use as snippet + content = "" + if "choices" in json_response and json_response["choices"]: + if i == 0: + content = json_response["choices"][0]["message"]["content"] + + result = {"link": citation, "title": f"Source {i+1}", "snippet": content} + results.append(result) + + if filter_list: + + results = get_filtered_results(results, filter_list) + + return [ + SearchResult( + link=result["link"], title=result["title"], snippet=result["snippet"] + ) + for result in results[:count] + ] + + except Exception as e: + log.error(f"Error searching with Perplexity API: {e}") + return [] diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index e69d2ce9634..d09445c376b 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -59,7 +59,7 @@ from open_webui.retrieval.web.tavily import search_tavily from open_webui.retrieval.web.bing import search_bing from open_webui.retrieval.web.exa import search_exa - +from open_webui.retrieval.web.perplexity import search_perplexity from open_webui.retrieval.utils import ( get_embedding_function, @@ -398,6 +398,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "bing_search_v7_endpoint": request.app.state.config.BING_SEARCH_V7_ENDPOINT, "bing_search_v7_subscription_key": request.app.state.config.BING_SEARCH_V7_SUBSCRIPTION_KEY, "exa_api_key": request.app.state.config.EXA_API_KEY, + "perplexity_api_key": request.app.state.config.PERPLEXITY_API_KEY, "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, "domain_filter_list": request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, @@ -451,6 +452,7 @@ class WebSearchConfig(BaseModel): bing_search_v7_endpoint: Optional[str] = None bing_search_v7_subscription_key: Optional[str] = None exa_api_key: Optional[str] = None + perplexity_api_key: Optional[str] = None result_count: Optional[int] = None concurrent_requests: Optional[int] = None trust_env: Optional[bool] = None @@ -580,6 +582,8 @@ async def update_rag_config( request.app.state.config.EXA_API_KEY = form_data.web.search.exa_api_key + request.app.state.config.PERPLEXITY_API_KEY = form_data.web.search.perplexity_api_key + request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = ( form_data.web.search.result_count ) @@ -641,6 +645,7 @@ async def update_rag_config( "bing_search_v7_endpoint": request.app.state.config.BING_SEARCH_V7_ENDPOINT, "bing_search_v7_subscription_key": request.app.state.config.BING_SEARCH_V7_SUBSCRIPTION_KEY, "exa_api_key": request.app.state.config.EXA_API_KEY, + "perplexity_api_key": request.app.state.config.PERPLEXITY_API_KEY, "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS, "trust_env": request.app.state.config.RAG_WEB_SEARCH_TRUST_ENV, @@ -1163,6 +1168,7 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]: - SERPLY_API_KEY - TAVILY_API_KEY - EXA_API_KEY + - PERPLEXITY_API_KEY - SEARCHAPI_API_KEY + SEARCHAPI_ENGINE (by default `google`) - SERPAPI_API_KEY + SERPAPI_ENGINE (by default `google`) Args: @@ -1327,6 +1333,13 @@ def search_web(request: Request, engine: str, query: str) -> list[SearchResult]: request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, ) + elif engine == "perplexity": + return search_perplexity( + request.app.state.config.PERPLEXITY_API_KEY, + query, + request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, + request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST, + ) else: raise Exception("No search engine API key found in environment variables") diff --git a/src/lib/components/admin/Settings/WebSearch.svelte b/src/lib/components/admin/Settings/WebSearch.svelte index 84e9d0e5a8e..df41bdbc9b1 100644 --- a/src/lib/components/admin/Settings/WebSearch.svelte +++ b/src/lib/components/admin/Settings/WebSearch.svelte @@ -29,7 +29,8 @@ 'tavily', 'jina', 'bing', - 'exa' + 'exa', + 'perplexity' ]; let youtubeLanguage = 'en'; @@ -344,6 +345,17 @@ bind:value={webConfig.search.exa_api_key} />
+ {:else if webConfig.search.engine === 'perplexity'} +
+
+ {$i18n.t('Perplexity API Key')} +
+ + +
{:else if webConfig.search.engine === 'bing'}
From d7b18b662b3c332c794f079d168657931db6e38e Mon Sep 17 00:00:00 2001 From: tidely <43219534+tidely@users.noreply.github.com> Date: Thu, 27 Feb 2025 11:19:27 +0200 Subject: [PATCH 102/623] chore: use logging.getLevelNamesMapping() Use `logging.getLevelNamesMapping()` for getting all valid logging levels. This also allows adding new ones using `logging.addLevel()`. This feature was [added](https://docs.python.org/3.11/library/logging.html#logging.getLevelNamesMapping) in Python 3.11 which is above the minimum supported Python version for open-webui. --- backend/open_webui/env.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index ba546a2eb5a..1e8be75cdc6 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -65,10 +65,8 @@ # LOGGING #################################### -log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"] - GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper() -if GLOBAL_LOG_LEVEL in log_levels: +if GLOBAL_LOG_LEVEL in logging.getLevelNamesMapping(): logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True) else: GLOBAL_LOG_LEVEL = "INFO" @@ -100,7 +98,7 @@ for source in log_sources: log_env_var = source + "_LOG_LEVEL" SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper() - if SRC_LOG_LEVELS[source] not in log_levels: + if SRC_LOG_LEVELS[source] not in logging.getLevelNamesMapping(): SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}") From b838a1df99a4ea9486a3488665fb27c318e59658 Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Thu, 27 Feb 2025 17:57:10 +0800 Subject: [PATCH 103/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index f386a97e059..119e4461637 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -86,7 +86,7 @@ "Archive All Chats": "封存所有對話紀錄", "Archived Chats": "封存的對話紀錄", "archived-chat-export": "archived-chat-export", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "您確定要清除所有記憶嗎?此操作無法復原。", "Are you sure you want to delete this channel?": "您確定要刪除此頻道嗎?", "Are you sure you want to delete this message?": "您確定要刪除此訊息嗎?", "Are you sure you want to unarchive all archived chats?": "您確定要解除封存所有封存的對話記錄嗎?", @@ -129,7 +129,7 @@ "Bocha Search API Key": "Bocha 搜尋 API 金鑰", "Brave Search API Key": "Brave 搜尋 API 金鑰", "By {{name}}": "由 {{name}} 製作", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "繞過嵌入與檢索", "Bypass SSL verification for Websites": "略過網站的 SSL 驗證", "Calendar": "日曆", "Call": "通話", @@ -163,7 +163,7 @@ "Ciphers": "加密方式", "Citation": "引用", "Clear memory": "清除記憶", - "Clear Memory": "", + "Clear Memory": "清除記憶", "click here": "點選這裡", "Click here for filter guides.": "點選這裡查看篩選器指南。", "Click here for help.": "點選這裡取得協助。", @@ -211,7 +211,7 @@ "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "限制用於推理模型的推理程度 。僅適用於特定供應商提供的、支援推理程度設定的推理模型。(預設:中等)", "Contact Admin for WebUI Access": "請聯絡管理員以取得 WebUI 存取權限", "Content": "內容", - "Content Extraction Engine": "", + "Content Extraction Engine": "內容擷取引擎", "Context Length": "上下文長度", "Continue Response": "繼續回應", "Continue with {{provider}}": "使用 {{provider}} 繼續", @@ -248,7 +248,7 @@ "Current Model": "目前模型", "Current Password": "目前密碼", "Custom": "自訂", - "Danger Zone": "", + "Danger Zone": "危險區域", "Dark": "深色", "Database": "資料庫", "December": "12 月", @@ -346,7 +346,7 @@ "ElevenLabs": "ElevenLabs", "Email": "Email", "Embark on adventures": "展開探險之旅", - "Embedding": "", + "Embedding": "嵌入", "Embedding Batch Size": "嵌入批次大小", "Embedding Model": "嵌入模型", "Embedding Model Engine": "嵌入模型引擎", @@ -572,7 +572,7 @@ "Input commands": "輸入命令", "Install from Github URL": "從 GitHub URL 安裝", "Instant Auto-Send After Voice Transcription": "語音轉錄後立即自動傳送", - "Integration": "", + "Integration": "集成", "Interface": "介面", "Invalid file format.": "無效檔案格式。", "Invalid Tag": "無效標籤", @@ -620,7 +620,7 @@ "Listening...": "正在聆聽...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "大型語言模型可能會出錯。請驗證重要資訊。", - "Loader": "", + "Loader": "載入器", "Loading Kokoro.js...": "Kokoro.js 載入中……", "Local": "本機", "Local Models": "本機模型", @@ -699,7 +699,7 @@ "No HTML, CSS, or JavaScript content found.": "找不到 HTML、CSS 或 JavaScript 內容。", "No inference engine with management support found": "找不到支援管理功能的推理引擎", "No knowledge found": "找不到知識", - "No memories to clear": "", + "No memories to clear": "沒有記憶可清除", "No model IDs": "沒有任何模型 ID", "No models found": "找不到模型", "No models selected": "未選取模型", @@ -837,7 +837,7 @@ "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "無法啟用回應通知,因為網站權限已遭拒。請前往瀏覽器設定以授予必要存取權限。", "Response splitting": "回應分割", "Result": "結果", - "Retrieval": "", + "Retrieval": "檢索", "Retrieval Query Generation": "檢索查詢生成", "Rich Text Input for Chat": "使用富文本輸入對話", "RK": "RK", @@ -1157,6 +1157,6 @@ "Your account status is currently pending activation.": "您的帳號目前正在等待啟用。", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "您的所有貢獻將會直接交給外掛開發者;Open WebUI 不會收取任何百分比。然而,所選擇的贊助平臺可能有其自身的費用。", "Youtube": "YouTube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "YouTube 語言", + "Youtube Proxy URL": "YouTube 代理網址" } From 37a7dd6d33b77cadc70be18ed20b1de10fc086bf Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 02:18:44 -0800 Subject: [PATCH 104/623] chore: format --- src/lib/i18n/locales/ar-BH/translation.json | 2 -- src/lib/i18n/locales/bg-BG/translation.json | 2 -- src/lib/i18n/locales/bn-BD/translation.json | 2 -- src/lib/i18n/locales/ca-ES/translation.json | 2 -- src/lib/i18n/locales/ceb-PH/translation.json | 2 -- src/lib/i18n/locales/cs-CZ/translation.json | 2 -- src/lib/i18n/locales/da-DK/translation.json | 2 -- src/lib/i18n/locales/de-DE/translation.json | 2 -- src/lib/i18n/locales/dg-DG/translation.json | 2 -- src/lib/i18n/locales/el-GR/translation.json | 2 -- src/lib/i18n/locales/en-GB/translation.json | 2 -- src/lib/i18n/locales/en-US/translation.json | 2 -- src/lib/i18n/locales/es-ES/translation.json | 2 -- src/lib/i18n/locales/eu-ES/translation.json | 2 -- src/lib/i18n/locales/fa-IR/translation.json | 2 -- src/lib/i18n/locales/fi-FI/translation.json | 2 -- src/lib/i18n/locales/fr-CA/translation.json | 2 -- src/lib/i18n/locales/fr-FR/translation.json | 2 -- src/lib/i18n/locales/he-IL/translation.json | 2 -- src/lib/i18n/locales/hi-IN/translation.json | 2 -- src/lib/i18n/locales/hr-HR/translation.json | 2 -- src/lib/i18n/locales/hu-HU/translation.json | 2 -- src/lib/i18n/locales/id-ID/translation.json | 2 -- src/lib/i18n/locales/ie-GA/translation.json | 2 -- src/lib/i18n/locales/it-IT/translation.json | 2 -- src/lib/i18n/locales/ja-JP/translation.json | 2 -- src/lib/i18n/locales/ka-GE/translation.json | 2 -- src/lib/i18n/locales/ko-KR/translation.json | 2 -- src/lib/i18n/locales/lt-LT/translation.json | 2 -- src/lib/i18n/locales/ms-MY/translation.json | 2 -- src/lib/i18n/locales/nb-NO/translation.json | 2 -- src/lib/i18n/locales/nl-NL/translation.json | 2 -- src/lib/i18n/locales/pa-IN/translation.json | 2 -- src/lib/i18n/locales/pl-PL/translation.json | 2 -- src/lib/i18n/locales/pt-BR/translation.json | 2 -- src/lib/i18n/locales/pt-PT/translation.json | 2 -- src/lib/i18n/locales/ro-RO/translation.json | 2 -- src/lib/i18n/locales/ru-RU/translation.json | 2 -- src/lib/i18n/locales/sk-SK/translation.json | 2 -- src/lib/i18n/locales/sr-RS/translation.json | 2 -- src/lib/i18n/locales/sv-SE/translation.json | 2 -- src/lib/i18n/locales/th-TH/translation.json | 2 -- src/lib/i18n/locales/tk-TW/translation.json | 2 -- src/lib/i18n/locales/tr-TR/translation.json | 2 -- src/lib/i18n/locales/uk-UA/translation.json | 2 -- src/lib/i18n/locales/ur-PK/translation.json | 2 -- src/lib/i18n/locales/vi-VN/translation.json | 2 -- src/lib/i18n/locales/zh-CN/translation.json | 2 -- src/lib/i18n/locales/zh-TW/translation.json | 2 -- 49 files changed, 98 deletions(-) diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index 1a128d92396..a722d15c4c0 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "عام", - "General Settings": "الاعدادات العامة", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "تأكد من إرفاقها", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index c14d438a0ea..cf3a112c4b8 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Основни", - "General Settings": "Основни Настройки", "Generate an image": "Генериране на изображение", "Generate Image": "Генериране на изображение", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Уверете се, че са заключени с", "Make sure to export a workflow.json file as API format from ComfyUI.": "Уверете се, че експортирате файл workflow.json като API формат от ComfyUI.", "Manage": "Управление", - "Manage Arena Models": "Управление на Arena модели", "Manage Direct Connections": "Управление на директни връзки", "Manage Models": "Управление на модели", "Manage Ollama": "Управление на Ollama", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index a3761d2aece..e4080ee7f59 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "সাধারণ", - "General Settings": "সাধারণ সেটিংসমূহ", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "এটা দিয়ে বন্ধনী দিতে ভুলবেন না", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index a7850727fa1..d58420aa4c9 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "Configuració de Gemini API", "Gemini API Key is required.": "La clau API de Gemini és necessària", "General": "General", - "General Settings": "Preferències generals", "Generate an image": "Generar una imatge", "Generate Image": "Generar imatge", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Assegura't d'envoltar-los amb", "Make sure to export a workflow.json file as API format from ComfyUI.": "Assegura't d'exportar un fitxer workflow.json com a format API des de ComfyUI.", "Manage": "Gestionar", - "Manage Arena Models": "Gestionar els models de l'Arena", "Manage Direct Connections": "Gestionar les connexions directes", "Manage Models": "Gestionar els models", "Manage Ollama": "Gestionar Ollama", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index 933c9aefbe6..a957731715f 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Heneral", - "General Settings": "kinatibuk-ang mga setting", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Siguruha nga palibutan sila", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 4168627f634..1807c08b413 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Obecný", - "General Settings": "Obecná nastavení", "Generate an image": "", "Generate Image": "Vygenerovat obrázek", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Ujistěte se, že jsou uzavřeny pomocí", "Make sure to export a workflow.json file as API format from ComfyUI.": "Ujistěte se, že exportujete soubor workflow.json ve formátu API z ComfyUI.", "Manage": "Spravovat", - "Manage Arena Models": "Správa modelů v Arena", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 06bfc725b9b..82f19119c2c 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Generelt", - "General Settings": "Generelle indstillinger", "Generate an image": "", "Generate Image": "Generer billede", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Sørg for at omslutte dem med", "Make sure to export a workflow.json file as API format from ComfyUI.": "Sørg for at eksportere en workflow.json-fil som API-format fra ComfyUI.", "Manage": "Administrer", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 04c52443b8a..ba1d2f31a3a 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Allgemein", - "General Settings": "Allgemeine Einstellungen", "Generate an image": "Bild erzeugen", "Generate Image": "Bild erzeugen", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Umschließe Variablen mit", "Make sure to export a workflow.json file as API format from ComfyUI.": "Stellen Sie sicher, dass sie eine workflow.json-Datei im API-Format von ComfyUI exportieren.", "Manage": "Verwalten", - "Manage Arena Models": "Arena-Modelle verwalten", "Manage Direct Connections": "", "Manage Models": "Modelle verwalten", "Manage Ollama": "Ollama verwalten", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 471ca9df43a..e410f2943d4 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Woweral", - "General Settings": "General Doge Settings", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Make sure to enclose them with", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index b3ae8643f51..076e9c96601 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Γενικά", - "General Settings": "Γενικές Ρυθμίσεις", "Generate an image": "", "Generate Image": "Δημιουργία Εικόνας", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Βεβαιωθείτε ότι τα περικλείετε με", "Make sure to export a workflow.json file as API format from ComfyUI.": "Βεβαιωθείτε ότι εξάγετε ένα αρχείο workflow.json ως μορφή API από το ComfyUI.", "Manage": "Διαχείριση", - "Manage Arena Models": "Διαχείριση Μοντέλων Arena", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Διαχείριση Ollama", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index 4810b6c6997..9bebdd923b8 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "", - "General Settings": "", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index 4810b6c6997..9bebdd923b8 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "", - "General Settings": "", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index 5536e02606e..79b03eeb630 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "General", - "General Settings": "Opciones Generales", "Generate an image": "Generar una imagen", "Generate Image": "Generar imagen", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Asegúrese de adjuntarlos con", "Make sure to export a workflow.json file as API format from ComfyUI.": "Asegúrese de exportar un archivo workflow.json en formato API desde ComfyUI.", "Manage": "Gestionar", - "Manage Arena Models": "Gestionar modelos de Arena", "Manage Direct Connections": "", "Manage Models": "Gestionar modelos", "Manage Ollama": "Gestionar Ollama", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index 8a43e6ea79c..ee5ed5e0c87 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Orokorra", - "General Settings": "Ezarpen Orokorrak", "Generate an image": "", "Generate Image": "Sortu Irudia", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Ziurtatu hauek gehitzen dituzula", "Make sure to export a workflow.json file as API format from ComfyUI.": "Ziurtatu workflow.json fitxategia API formatu gisa esportatzen duzula ComfyUI-tik.", "Manage": "Kudeatu", - "Manage Arena Models": "Kudeatu Arena Modeloak", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Kudeatu Ollama", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 6da8d6e9000..038d3ec74df 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "عمومی", - "General Settings": "تنظیمات عمومی", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "مطمئن شوید که آنها را با این محصور کنید:", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index a14e5461d31..b5e09bb8b20 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "Gemini API konfiguraatio", "Gemini API Key is required.": "Gemini API -avain on vaaditaan.", "General": "Yleinen", - "General Settings": "Yleiset asetukset", "Generate an image": "Luo kuva", "Generate Image": "Luo kuva", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Varmista, että suljet ne", "Make sure to export a workflow.json file as API format from ComfyUI.": "Muista viedä workflow.json-tiedosto API-muodossa ComfyUI:sta.", "Manage": "Hallitse", - "Manage Arena Models": "Hallitse Arena-malleja", "Manage Direct Connections": "Hallitse suoria yhteyksiä", "Manage Models": "Hallitse malleja", "Manage Ollama": "Hallitse Ollamaa", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index a4031f3c308..7e98dd7e4cf 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Général", - "General Settings": "Paramètres Généraux", "Generate an image": "", "Generate Image": "Générer une image", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Assurez-vous de les inclure dans", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Gérer", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 56b80691803..f9a3b452151 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Général", - "General Settings": "Paramètres généraux", "Generate an image": "", "Generate Image": "Générer une image", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Assurez-vous de les inclure dans", "Make sure to export a workflow.json file as API format from ComfyUI.": "Veillez à exporter un fichier workflow.json au format API depuis ComfyUI.", "Manage": "Gérer", - "Manage Arena Models": "Gérer les modèles d'arène", "Manage Direct Connections": "", "Manage Models": "Gérer les modèles", "Manage Ollama": "Gérer Ollama", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 11a739f9b23..8eef95dd45c 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "כללי", - "General Settings": "הגדרות כלליות", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "ודא להקיף אותם עם", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 048116a538b..67058123e3d 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "सामान्य", - "General Settings": "सामान्य सेटिंग्स", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "उन्हें संलग्न करना सुनिश्चित करें", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index 287157283ea..de7aa166bdb 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Općenito", - "General Settings": "Opće postavke", "Generate an image": "", "Generate Image": "Gneriraj sliku", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Provjerite da ih zatvorite s", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Upravljaj", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index 9bd3b1ccf36..4f59a6684b5 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Általános", - "General Settings": "Általános beállítások", "Generate an image": "", "Generate Image": "Kép generálása", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Győződjön meg róla, hogy körülveszi őket", "Make sure to export a workflow.json file as API format from ComfyUI.": "Győződjön meg róla, hogy exportál egy workflow.json fájlt API formátumban a ComfyUI-ból.", "Manage": "Kezelés", - "Manage Arena Models": "Arena modellek kezelése", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index 13d82e96e66..acb2fa882a0 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Umum", - "General Settings": "Pengaturan Umum", "Generate an image": "", "Generate Image": "Menghasilkan Gambar", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Pastikan untuk melampirkannya dengan", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Mengelola", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 59f00824192..29bf681544c 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Ginearálta", - "General Settings": "Socruithe Ginearálta", "Generate an image": "Gin íomhá", "Generate Image": "Ginigh Íomhá", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Déan cinnte iad a cheangal le", "Make sure to export a workflow.json file as API format from ComfyUI.": "Déan cinnte comhad workflow.json a onnmhairiú mar fhormáid API ó ComfyUI.", "Manage": "Bainistiú", - "Manage Arena Models": "Bainistigh Múnlaí Airéine", "Manage Direct Connections": "", "Manage Models": "Samhlacha a bhainistiú", "Manage Ollama": "Bainistigh Ollama", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index 02ab2c938e6..b8eb02eebbb 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Generale", - "General Settings": "Impostazioni generali", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Assicurati di racchiuderli con", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 7fff4440617..ebe4d2fc6ca 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "一般", - "General Settings": "一般設定", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "必ず次で囲んでください", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "管理", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 345b82289d0..952788840bf 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "ზოგადი", - "General Settings": "ზოგადი პარამეტრები", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "დარწმუნდით, რომ ჩასვით ისინი", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "მართვა", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index a964bbe9342..f35469cb4e9 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "일반", - "General Settings": "일반 설정", "Generate an image": "", "Generate Image": "이미지 생성", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "꼭 다음으로 감싸세요:", "Make sure to export a workflow.json file as API format from ComfyUI.": "꼭 workflow.json 파일을 ComfyUI의 API 형식대로 내보내세요", "Manage": "관리", - "Manage Arena Models": "아레나 모델 관리", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 9b86f388c37..999c19ca505 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Bendri", - "General Settings": "Bendri nustatymai", "Generate an image": "", "Generate Image": "Generuoti paveikslėlį", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Užtikrinktie, kad įtraukiate viduje:", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Tvarkyti", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 5f47e799be2..9758e4f7bc0 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Umum", - "General Settings": "Tetapan Umum", "Generate an image": "", "Generate Image": "Jana Imej", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Pastikan untuk melampirkannya dengan", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Urus", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 701e3ea0507..27eca6694de 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "Konfigurasjon", "Gemini API Key is required.": "Det kreves en API-nøkkel for Gemini.", "General": "Generelt", - "General Settings": "Generelle innstillinger", "Generate an image": "Genrer et bilde", "Generate Image": "Generer bilde", "Generate prompt pair": "Generer ledetekst-kombinasjon", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Sørg for å omslutte dem med", "Make sure to export a workflow.json file as API format from ComfyUI.": "Sørg for å eksportere en workflow.json-fil i API-formatet fra ComfyUI.", "Manage": "Administrer", - "Manage Arena Models": "Behandle Arena-modeller", "Manage Direct Connections": "Behandle direkte koblinger", "Manage Models": "Behandle modeller", "Manage Ollama": "Behandle Ollama", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index bcb875b18d7..5faaa477365 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Algemeen", - "General Settings": "Algemene instellingen", "Generate an image": "", "Generate Image": "Genereer afbeelding", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Zorg ervoor dat je ze omringt met", "Make sure to export a workflow.json file as API format from ComfyUI.": "Zorg ervoor dat je een workflow.json-bestand als API-formaat exporteert vanuit ComfyUI.", "Manage": "Beheren", - "Manage Arena Models": "Beheer srenamodellen", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Beheer Ollama", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index 5c60edc5a87..1fff2933fee 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "ਆਮ", - "General Settings": "ਆਮ ਸੈਟਿੰਗਾਂ", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "ਸੁਨਿਸ਼ਚਿਤ ਕਰੋ ਕਿ ਉਨ੍ਹਾਂ ਨੂੰ ਘੇਰੋ", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 0572b1d4fcc..42c8d19dc58 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Ogólne", - "General Settings": "Ustawienia ogólne", "Generate an image": "Wygeneruj obraz", "Generate Image": "Wygeneruj obraz", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Upewnij się, że są one zawarte w", "Make sure to export a workflow.json file as API format from ComfyUI.": "Upewnij się, że wyeksportowałeś plik workflow.json w formacie API z ComfyUI.", "Manage": "Zarządzaj", - "Manage Arena Models": "Zarządzaj modelami areny", "Manage Direct Connections": "Zarządzaj bezpośrednimi połączeniami", "Manage Models": "Zarządzaj modelami", "Manage Ollama": "Zarządzaj Ollamą", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 1f1c6d5338b..86dc3377f90 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Geral", - "General Settings": "Configurações Gerais", "Generate an image": "", "Generate Image": "Gerar Imagem", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Certifique-se de encerrá-los com", "Make sure to export a workflow.json file as API format from ComfyUI.": "Certifique-se de exportar um arquivo workflow.json como o formato API do ComfyUI.", "Manage": "Gerenciar", - "Manage Arena Models": "Gerenciar Arena de Modelos", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Gerenciar Ollama", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index 350c47ccffb..e8ef705a8ea 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Geral", - "General Settings": "Configurações Gerais", "Generate an image": "", "Generate Image": "Gerar imagem", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Certifique-se de colocá-los entre", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Gerir", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index fc2258a0823..4911aa52797 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "General", - "General Settings": "Setări Generale", "Generate an image": "", "Generate Image": "Generează Imagine", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Asigurați-vă că le închideți cu", "Make sure to export a workflow.json file as API format from ComfyUI.": "Asigură-te că exporți un fișier {{workflow.json}} în format API din {{ComfyUI}}.", "Manage": "Gestionează", - "Manage Arena Models": "Gestionați Modelele Arena", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 0552bc2c7bf..1d9fae43329 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Общее", - "General Settings": "Общие настройки", "Generate an image": "", "Generate Image": "Сгенерировать изображение", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Убедитесь, что они заключены в", "Make sure to export a workflow.json file as API format from ComfyUI.": "Убедитесь, что экспортируете файл workflow.json в формате API из ComfyUI.", "Manage": "Управлять", - "Manage Arena Models": "Управление Ареной Моделей", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Управление Ollama", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index ae1d3efccc1..a25b916d1e6 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Všeobecné", - "General Settings": "Všeobecné nastavenia", "Generate an image": "", "Generate Image": "Vygenerovať obrázok", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Uistite sa, že sú uzavreté pomocou", "Make sure to export a workflow.json file as API format from ComfyUI.": "Uistite sa, že exportujete súbor workflow.json vo formáte API z ComfyUI.", "Manage": "Spravovať", - "Manage Arena Models": "Správa modelov v Arena", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index ee3c027193a..49f5ea3a9f3 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Опште", - "General Settings": "Општа подешавања", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Уверите се да их затворите са", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Управљај", - "Manage Arena Models": "Управљај моделима арене", "Manage Direct Connections": "", "Manage Models": "Управљај моделима", "Manage Ollama": "Управљај Ollama-ом", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index e958aa3751d..fefd3337d80 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Allmän", - "General Settings": "Allmänna inställningar", "Generate an image": "", "Generate Image": "Generera bild", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Se till att bifoga dem med", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "Hantera", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 6ebd2b106c1..488142d81d8 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "ทั่วไป", - "General Settings": "การตั้งค่าทั่วไป", "Generate an image": "", "Generate Image": "สร้างภาพ", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "จัดการ", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index 4810b6c6997..9bebdd923b8 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "", - "General Settings": "", "Generate an image": "", "Generate Image": "", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "", "Make sure to export a workflow.json file as API format from ComfyUI.": "", "Manage": "", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index 7e04f611c79..25fe0256c45 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Genel", - "General Settings": "Genel Ayarlar", "Generate an image": "", "Generate Image": "Görsel Üret", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Değişkenlerinizi şu şekilde biçimlendirin:", "Make sure to export a workflow.json file as API format from ComfyUI.": "ComfyUI'dan API formatında bir workflow.json dosyası olarak dışa aktardığınızdan emin olun.", "Manage": "Yönet", - "Manage Arena Models": "Arena Modellerini Yönet", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "Ollama'yı Yönet", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index aa5731cbf5d..57829d390e0 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Загальні", - "General Settings": "Загальні налаштування", "Generate an image": "Згенерувати зображення", "Generate Image": "Створити зображення", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Переконайтеся, що вони закриті", "Make sure to export a workflow.json file as API format from ComfyUI.": "Обов'язково експортуйте файл workflow.json у форматі API з ComfyUI.", "Manage": "Керувати", - "Manage Arena Models": "Керувати моделями Arena", "Manage Direct Connections": "Керування прямими з'єднаннями", "Manage Models": "Керувати моделями", "Manage Ollama": "Керувати Ollama", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index cd0102ff1f2..136a8a8b396 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "عمومی", - "General Settings": "عمومی ترتیبات", "Generate an image": "", "Generate Image": "تصویر بنائیں", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "انہیں کے ساتھ شامل کریں", "Make sure to export a workflow.json file as API format from ComfyUI.": "یقینی بنائیں کہ ComfyUI سے workflow.json فائل کو API فارمیٹ میں ایکسپورٹ کریں", "Manage": "مینیج کریں", - "Manage Arena Models": "ایرینا ماڈلز کا نظم کریں", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 9bdc7892a46..2bc35723b3e 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Cài đặt chung", - "General Settings": "Cấu hình chung", "Generate an image": "", "Generate Image": "Sinh ảnh", "Generate prompt pair": "", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "Hãy chắc chắn bao quanh chúng bằng", "Make sure to export a workflow.json file as API format from ComfyUI.": "Đảm bảo xuất tệp Workflow.json đúng format API của ComfyUI.", "Manage": "Quản lý", - "Manage Arena Models": "", "Manage Direct Connections": "", "Manage Models": "", "Manage Ollama": "", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index d89213684cf..c64ce40da5a 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "Gemini API 配置", "Gemini API Key is required.": "需要 Gemini API 密钥。", "General": "通用", - "General Settings": "通用设置", "Generate an image": "生成图像", "Generate Image": "生成图像", "Generate prompt pair": "生成提示对", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "确保将它们包含在内", "Make sure to export a workflow.json file as API format from ComfyUI.": "确保从 ComfyUI 导出 API 格式的 workflow.json 文件。", "Manage": "管理", - "Manage Arena Models": "管理竞技场模型", "Manage Direct Connections": "管理直接连接", "Manage Models": "管理模型", "Manage Ollama": "管理 Ollama", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 119e4461637..161e73649a7 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -513,7 +513,6 @@ "Gemini API Config": "Gemini API 設定", "Gemini API Key is required.": "必須提供 Gemini API 金鑰", "General": "一般", - "General Settings": "一般設定", "Generate an image": "產生圖片", "Generate Image": "產生圖片", "Generate prompt pair": "產生提示配對", @@ -631,7 +630,6 @@ "Make sure to enclose them with": "請務必將它們放在", "Make sure to export a workflow.json file as API format from ComfyUI.": "請確保從 ComfyUI 匯出 workflow.json 檔案為 API 格式。", "Manage": "管理", - "Manage Arena Models": "管理競技模型", "Manage Direct Connections": "管理直接連線", "Manage Models": "管理模型", "Manage Ollama": "管理 Ollama", From 8241fa2e12d27f50b2514643634b340167b8ff06 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 03:45:33 -0800 Subject: [PATCH 105/623] doc: changelog --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93d9865f828..22bf47ab3a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.17] - 2025-02-27 + +### Added + +- **🚀 Instant Document Upload with Bypass Embedding & Retrieval**: Admins can now enable "Bypass Embedding & Retrieval" in Admin Settings > Documents, significantly speeding up document uploads and ensuring full document context is retained without chunking. +- **🔎 "Stream" Hook for Real-Time Filtering**: The new "stream" hook allows dynamic real-time message filtering. Learn more in our documentation (https://docs.openwebui.com/features/plugin/functions/filter). +- **☁️ OneDrive Integration**: Early support for OneDrive storage integration has been introduced, expanding file import options. +- **📈 Enhanced Logging with Loguru**: Backend logging has been improved with Loguru, making debugging and issue tracking far more efficient. +- **⚙️ General Stability Enhancements**: Backend and frontend refactoring improves performance, ensuring a smoother and more reliable user experience. +- **🌍 Updated Translations**: Refined multilingual support for better localization and accuracy across various languages. + +### Fixed + +- **🔄 Reliable Model Imports from the Community Platform**: Resolved import failures, allowing seamless integration of community-shared models without errors. +- **📊 OpenAI Usage Statistics Restored**: Fixed an issue where OpenAI usage metrics were not displaying correctly, ensuring accurate tracking of usage data. +- **🗂️ Deduplication for Retrieved Documents**: Documents retrieved during searches are now intelligently deduplicated, meaning no more redundant results—helping to keep information concise and relevant. + +### Changed + +- **📝 "Full Context Mode" Renamed for Clarity**: The "Full Context Mode" toggle in Web Search settings is now labeled "Bypass Embedding & Retrieval" for consistency across the UI. + ## [0.5.16] - 2025-02-20 ### Fixed From 38fb9d5fd8b6b39c33f027b63758c210bdd16de0 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 03:54:16 -0800 Subject: [PATCH 106/623] chore: jspdf depedency --- package-lock.json | 215 +++++++++++++++++++++++++++++++++++++++++++++- package.json | 1 + 2 files changed, 212 insertions(+), 4 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1ce7424f5e5..1d2ad0a1ece 100644 --- a/package-lock.json +++ b/package-lock.json @@ -41,6 +41,7 @@ "i18next-resources-to-backend": "^1.2.0", "idb": "^7.1.1", "js-sha256": "^0.10.1", + "jspdf": "^3.0.0", "katex": "^0.16.21", "kokoro-js": "^1.1.1", "marked": "^9.1.0", @@ -135,9 +136,10 @@ } }, "node_modules/@babel/runtime": { - "version": "7.24.1", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz", - "integrity": "sha512-+BIznRzyqBf+2wCTxcKE3wDjfGeCoVE61KSHGpkzqrLi8qxqFwBeUFyId2cxkTmm55fzDGnm0+yCxaxygrLUnQ==", + "version": "7.26.9", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.26.9.tgz", + "integrity": "sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg==", + "license": "MIT", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -3170,6 +3172,13 @@ "integrity": "sha512-Sk/uYFOBAB7mb74XcpizmH0KOR2Pv3D2Hmrh1Dmy5BmK3MpdSa5kqZcg6EKBdklU0bFXX9gCfzvpnyUehrPIuA==", "dev": true }, + "node_modules/@types/raf": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/@types/raf/-/raf-3.4.3.tgz", + "integrity": "sha512-c4YAvMedbPZ5tEyxzQdMoOhhJ4RD3rngZIdwC2/qDN3d7JpEhB6fiBRKVY1lg5B7Wk+uPBjn5f39j1/2MY1oOw==", + "license": "MIT", + "optional": true + }, "node_modules/@types/resolve": { "version": "1.20.2", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", @@ -3199,6 +3208,13 @@ "integrity": "sha512-MQ1AnmTLOncwEf9IVU+B2e4Hchrku5N67NkgcAHW0p3sdzPe0FNMANxEm6OJUzPniEQGkeT3OROLlCwZJLWFZA==", "dev": true }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "license": "MIT", + "optional": true + }, "node_modules/@types/unist": { "version": "2.0.10", "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", @@ -3794,6 +3810,18 @@ "node": ">= 4.0.0" } }, + "node_modules/atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==", + "license": "(MIT OR Apache-2.0)", + "bin": { + "atob": "bin/atob.js" + }, + "engines": { + "node": ">= 4.5.0" + } + }, "node_modules/aws-sign2": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", @@ -3829,6 +3857,16 @@ "dev": true, "optional": true }, + "node_modules/base64-arraybuffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-1.0.2.tgz", + "integrity": "sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">= 0.6.0" + } + }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", @@ -4033,6 +4071,18 @@ "node": "10.* || >= 12.*" } }, + "node_modules/btoa": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/btoa/-/btoa-1.2.1.tgz", + "integrity": "sha512-SB4/MIGlsiVkMcHmT+pSmIPoNDoHg+7cMzmt3Uxt628MTz2487DKSqK/fuhFBrkuqrYv5UCEnACpF4dTFNKc/g==", + "license": "(MIT OR Apache-2.0)", + "bin": { + "btoa": "bin/btoa.js" + }, + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/buffer": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", @@ -4131,6 +4181,33 @@ "node": ">=6" } }, + "node_modules/canvg": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/canvg/-/canvg-3.0.10.tgz", + "integrity": "sha512-qwR2FRNO9NlzTeKIPIKpnTY6fqwuYSequ8Ru8c0YkYU7U0oW+hLUvWadLvAu1Rl72OMNiFhoLu4f8eUjQ7l/+Q==", + "license": "MIT", + "optional": true, + "dependencies": { + "@babel/runtime": "^7.12.5", + "@types/raf": "^3.4.0", + "core-js": "^3.8.3", + "raf": "^3.4.1", + "regenerator-runtime": "^0.13.7", + "rgbcolor": "^1.0.1", + "stackblur-canvas": "^2.0.0", + "svg-pathdata": "^6.0.3" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/canvg/node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==", + "license": "MIT", + "optional": true + }, "node_modules/caseless": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", @@ -4599,6 +4676,18 @@ "node": ">= 0.6" } }, + "node_modules/core-js": { + "version": "3.40.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.40.0.tgz", + "integrity": "sha512-7vsMc/Lty6AGnn7uFpYT56QesI5D2Y/UkgKounk87OP9Z2H9Z8kj6jzcSGAxFmUtDOS0ntK6lbQz+Nsa0Jj6mQ==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, "node_modules/core-util-is": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", @@ -4643,6 +4732,16 @@ "node": ">= 8" } }, + "node_modules/css-line-break": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/css-line-break/-/css-line-break-2.1.0.tgz", + "integrity": "sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==", + "license": "MIT", + "optional": true, + "dependencies": { + "utrie": "^1.0.2" + } + }, "node_modules/css-select": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", @@ -6106,6 +6205,12 @@ "pend": "~1.2.0" } }, + "node_modules/fflate": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", + "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==", + "license": "MIT" + }, "node_modules/figures": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", @@ -6701,6 +6806,20 @@ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-3.0.3.tgz", "integrity": "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ==" }, + "node_modules/html2canvas": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/html2canvas/-/html2canvas-1.4.1.tgz", + "integrity": "sha512-fPU6BHNpsyIhr8yyMpTLLxAbkaK8ArIBcmZIRiBLiDhjeqvXolaEmDGmELFuX9I4xDcaKKcJl+TKZLqruBbmWA==", + "license": "MIT", + "optional": true, + "dependencies": { + "css-line-break": "^2.1.0", + "text-segmentation": "^1.0.3" + }, + "engines": { + "node": ">=8.0.0" + } + }, "node_modules/htmlparser2": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", @@ -7213,6 +7332,34 @@ "graceful-fs": "^4.1.6" } }, + "node_modules/jspdf": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/jspdf/-/jspdf-3.0.0.tgz", + "integrity": "sha512-QvuQZvOI8CjfjVgtajdL0ihrDYif1cN5gXiF9lb9Pd9JOpmocvnNyFO9sdiJ/8RA5Bu8zyGOUjJLj5kiku16ug==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.26.0", + "atob": "^2.1.2", + "btoa": "^1.2.1", + "fflate": "^0.8.1" + }, + "optionalDependencies": { + "canvg": "^3.0.6", + "core-js": "^3.6.0", + "dompurify": "^3.2.4", + "html2canvas": "^1.0.0-rc.5" + } + }, + "node_modules/jspdf/node_modules/dompurify": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.4.tgz", + "integrity": "sha512-ysFSFEDVduQpyhzAob/kkuJjf5zWkZD8/A9ywSp1byueyuCfHamrCBa14/Oc2iiB0e51B+NpxSl5gmzn+Ms/mg==", + "license": "(MPL-2.0 OR Apache-2.0)", + "optional": true, + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, "node_modules/jsprim": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-2.0.2.tgz", @@ -9029,7 +9176,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==", - "dev": true + "devOptional": true }, "node_modules/periscopic": { "version": "3.1.0", @@ -9755,6 +9902,16 @@ "rimraf": "bin.js" } }, + "node_modules/raf": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz", + "integrity": "sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA==", + "license": "MIT", + "optional": true, + "dependencies": { + "performance-now": "^2.1.0" + } + }, "node_modules/react-is": { "version": "18.3.1", "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", @@ -9894,6 +10051,16 @@ "integrity": "sha512-r5a3l5HzYlIC68TpmYKlxWjmOP6wiPJ1vWv2HeLhNsRZMrCkxeqxiHlQ21oXmQ4F3SiryXBHhAD7JZqvOJjFmg==", "dev": true }, + "node_modules/rgbcolor": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/rgbcolor/-/rgbcolor-1.0.1.tgz", + "integrity": "sha512-9aZLIrhRaD97sgVhtJOW6ckOEh6/GnvQtdVNfdZ6s67+3/XwLS9lBcQYzEEhYVeUowN7pRzMLsyGhK2i/xvWbw==", + "license": "MIT OR SEE LICENSE IN FEEL-FREE.md", + "optional": true, + "engines": { + "node": ">= 0.8.15" + } + }, "node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", @@ -10785,6 +10952,16 @@ "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", "dev": true }, + "node_modules/stackblur-canvas": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/stackblur-canvas/-/stackblur-canvas-2.7.0.tgz", + "integrity": "sha512-yf7OENo23AGJhBriGx0QivY5JP6Y1HbrrDI6WLt6C5auYZXlQrheoY8hD4ibekFKz1HOfE48Ww8kMWMnJD/zcQ==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=0.1.14" + } + }, "node_modules/std-env": { "version": "3.7.0", "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", @@ -11166,6 +11343,16 @@ "@types/estree": "*" } }, + "node_modules/svg-pathdata": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/svg-pathdata/-/svg-pathdata-6.0.3.tgz", + "integrity": "sha512-qsjeeq5YjBZ5eMdFuUa4ZosMLxgr5RZ+F+Y1OrDhuOCEInRMA3x74XdBtggJcj9kOeInz0WE+LgCPDkZFlBYJw==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/symlink-or-copy": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/symlink-or-copy/-/symlink-or-copy-1.3.1.tgz", @@ -11258,6 +11445,16 @@ "streamx": "^2.12.5" } }, + "node_modules/text-segmentation": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/text-segmentation/-/text-segmentation-1.0.3.tgz", + "integrity": "sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==", + "license": "MIT", + "optional": true, + "dependencies": { + "utrie": "^1.0.2" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -11597,6 +11794,16 @@ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", "dev": true }, + "node_modules/utrie": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/utrie/-/utrie-1.0.2.tgz", + "integrity": "sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==", + "license": "MIT", + "optional": true, + "dependencies": { + "base64-arraybuffer": "^1.0.2" + } + }, "node_modules/uuid": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", diff --git a/package.json b/package.json index a5db14e190c..e0722dedfba 100644 --- a/package.json +++ b/package.json @@ -84,6 +84,7 @@ "i18next-resources-to-backend": "^1.2.0", "idb": "^7.1.1", "js-sha256": "^0.10.1", + "jspdf": "^3.0.0", "katex": "^0.16.21", "kokoro-js": "^1.1.1", "marked": "^9.1.0", From 203dfdedf1e0e6b2fc9e176347a49003d3185b71 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Thu, 27 Feb 2025 23:12:09 +0900 Subject: [PATCH 107/623] Update deprecated msal-browser version to the latest --- package-lock.json | 22 +++++ package.json | 1 + src/lib/utils/onedrive-file-picker.ts | 122 +++++++++++++++----------- 3 files changed, 92 insertions(+), 53 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1ce7424f5e5..a3f57f265b3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -8,6 +8,7 @@ "name": "open-webui", "version": "0.5.17", "dependencies": { + "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", @@ -134,6 +135,27 @@ "node": ">=6.0.0" } }, + "node_modules/@azure/msal-browser": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/@azure/msal-browser/-/msal-browser-4.5.0.tgz", + "integrity": "sha512-H7mWmu8yI0n0XxhJobrgncXI6IU5h8DKMiWDHL5y+Dc58cdg26GbmaMUehbUkdKAQV2OTiFa4FUa6Fdu/wIxBg==", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "15.2.0" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-common": { + "version": "15.2.0", + "resolved": "https://registry.npmjs.org/@azure/msal-common/-/msal-common-15.2.0.tgz", + "integrity": "sha512-HiYfGAKthisUYqHG1nImCf/uzcyS31wng3o+CycWLIM9chnYJ9Lk6jZ30Y6YiYYpTQ9+z/FGUpiKKekd3Arc0A==", + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/@babel/runtime": { "version": "7.24.1", "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.1.tgz", diff --git a/package.json b/package.json index a5db14e190c..ed52f5fb59a 100644 --- a/package.json +++ b/package.json @@ -51,6 +51,7 @@ }, "type": "module", "dependencies": { + "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/language-data": "^6.5.1", diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index d26469529e6..e14da6eae93 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -1,7 +1,11 @@ -let CLIENT_ID = ''; +import { PublicClientApplication } from '@azure/msal-browser'; +import type { PopupRequest } from '@azure/msal-browser'; + +let CLIENT_ID = '521ada3e-6154-4a35-b9d3-51faac8ac944'; async function getCredentials() { if (CLIENT_ID) return; + const response = await fetch('/api/config'); if (!response.ok) { throw new Error('Failed to fetch OneDrive credentials'); @@ -13,63 +17,69 @@ async function getCredentials() { } } -function loadMsalScript(): Promise { - return new Promise((resolve, reject) => { - const win = window; - if (win.msal) { - resolve(); - return; - } - const script = document.createElement('script'); - script.src = 'https://alcdn.msauth.net/browser/2.19.0/js/msal-browser.min.js'; - script.async = true; - script.onload = () => resolve(); - script.onerror = () => reject(new Error('Failed to load MSAL script')); - document.head.appendChild(script); - }); -} -let msalInstance: any; +let msalInstance: PublicClientApplication | null = null; // Initialize MSAL authentication async function initializeMsal() { - if (!CLIENT_ID) { - await getCredentials(); - } - const msalParams = { - auth: { - authority: 'https://login.microsoftonline.com/consumers', - clientId: CLIENT_ID - } - }; try { - await loadMsalScript(); - const win = window; - msalInstance = new win.msal.PublicClientApplication(msalParams); - if (msalInstance.initialize) { - await msalInstance.initialize(); + if (!CLIENT_ID) { + await getCredentials(); + } + + const msalParams = { + auth: { + authority: 'https://login.microsoftonline.com/consumers', + clientId: CLIENT_ID + } + }; + + if (!msalInstance) { + msalInstance = new PublicClientApplication(msalParams); + if (msalInstance.initialize) { + await msalInstance.initialize(); + } } + + return msalInstance; } catch (error) { - console.error('MSAL initialization error:', error); + throw new Error('MSAL initialization failed: ' + (error instanceof Error ? error.message : String(error))); } } // Retrieve OneDrive access token async function getToken(): Promise { - const authParams = { scopes: ['OneDrive.ReadWrite'] }; + const authParams: PopupRequest = { scopes: ['OneDrive.ReadWrite'] }; let accessToken = ''; try { - await initializeMsal(); + msalInstance = await initializeMsal(); + if (!msalInstance) { + throw new Error('MSAL not initialized'); + } + const resp = await msalInstance.acquireTokenSilent(authParams); accessToken = resp.accessToken; } catch (err) { - const resp = await msalInstance.loginPopup(authParams); - msalInstance.setActiveAccount(resp.account); - if (resp.idToken) { - const resp2 = await msalInstance.acquireTokenSilent(authParams); - accessToken = resp2.accessToken; + if (!msalInstance) { + throw new Error('MSAL not initialized'); + } + + try { + const resp = await msalInstance.loginPopup(authParams); + msalInstance.setActiveAccount(resp.account); + if (resp.idToken) { + const resp2 = await msalInstance.acquireTokenSilent(authParams); + accessToken = resp2.accessToken; + } + } catch (popupError) { + throw new Error('Failed to login: ' + (popupError instanceof Error ? popupError.message : String(popupError))); } } + + if (!accessToken) { + throw new Error('Failed to acquire access token'); + } + return accessToken; } @@ -95,6 +105,7 @@ const params = { } }; + // Download file from OneDrive async function downloadOneDriveFile(fileInfo: any): Promise { const accessToken = await getToken(); @@ -119,6 +130,8 @@ async function downloadOneDriveFile(fileInfo: any): Promise { return await downloadResponse.blob(); } +// OneDrive 피커 결과 인터페이스 정의 + // Open OneDrive file picker and return selected file metadata export async function openOneDrivePicker(): Promise { if (typeof window === 'undefined') { @@ -162,7 +175,6 @@ export async function openOneDrivePicker(): Promise { throw new Error('Could not retrieve auth token'); } } catch (err) { - console.error(err); channelPort?.postMessage({ result: 'error', error: { code: 'tokenError', message: 'Failed to get token' }, @@ -187,7 +199,6 @@ export async function openOneDrivePicker(): Promise { break; } default: { - console.warn('Unsupported command:', command); channelPort?.postMessage({ result: 'error', error: { code: 'unsupportedCommand', message: command.command }, @@ -218,14 +229,17 @@ export async function openOneDrivePicker(): Promise { if (!authToken) { return reject(new Error('Failed to acquire access token')); } + pickerWindow = window.open('', 'OneDrivePicker', 'width=800,height=600'); if (!pickerWindow) { return reject(new Error('Failed to open OneDrive picker window')); } + const queryString = new URLSearchParams({ filePicker: JSON.stringify(params) }); const url = `${baseUrl}?${queryString.toString()}`; + const form = pickerWindow.document.createElement('form'); form.setAttribute('action', url); form.setAttribute('method', 'POST'); @@ -234,11 +248,15 @@ export async function openOneDrivePicker(): Promise { input.setAttribute('name', 'access_token'); input.setAttribute('value', authToken); form.appendChild(input); + pickerWindow.document.body.appendChild(form); form.submit(); + window.addEventListener('message', handleWindowMessage); } catch (err) { - if (pickerWindow) pickerWindow.close(); + if (pickerWindow) { + pickerWindow.close(); + } reject(err); } }; @@ -249,18 +267,16 @@ export async function openOneDrivePicker(): Promise { // Pick and download file from OneDrive export async function pickAndDownloadFile(): Promise<{ blob: Blob; name: string } | null> { - try { - const pickerResult = await openOneDrivePicker(); - if (!pickerResult || !pickerResult.items || pickerResult.items.length === 0) { - return null; - } - const selectedFile = pickerResult.items[0]; - const blob = await downloadOneDriveFile(selectedFile); - return { blob, name: selectedFile.name }; - } catch (error) { - console.error('Error occurred during OneDrive file pick/download:', error); - throw error; + const pickerResult = await openOneDrivePicker(); + + if (!pickerResult || !pickerResult.items || pickerResult.items.length === 0) { + return null; } + + const selectedFile = pickerResult.items[0]; + const blob = await downloadOneDriveFile(selectedFile); + + return { blob, name: selectedFile.name }; } export { downloadOneDriveFile }; From c5cfcc1229ec39b232b77d0d08f3281bcdcc31d8 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Thu, 27 Feb 2025 23:16:52 +0900 Subject: [PATCH 108/623] chore: cleanup --- src/lib/utils/onedrive-file-picker.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index e14da6eae93..2f4910a2dc0 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -130,8 +130,6 @@ async function downloadOneDriveFile(fileInfo: any): Promise { return await downloadResponse.blob(); } -// OneDrive 피커 결과 인터페이스 정의 - // Open OneDrive file picker and return selected file metadata export async function openOneDrivePicker(): Promise { if (typeof window === 'undefined') { From f643182eb0b9e2a7ea18a08ee04b34b575329760 Mon Sep 17 00:00:00 2001 From: v0ltis Date: Thu, 27 Feb 2025 19:04:53 +0100 Subject: [PATCH 109/623] Updated french translation --- src/lib/i18n/locales/fr-FR/translation.json | 76 ++++++++++----------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index f9a3b452151..56ffd65cf4f 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -21,9 +21,9 @@ "Account Activation Pending": "Activation du compte en attente", "Accurate information": "Information exacte", "Actions": "Actions", - "Activate": "", + "Activate": "Activer", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Activez cette commande en tapant \"/{{COMMAND}}\" dans l'entrée de chat.", - "Active Users": "Utilisateurs actifs", + "Active Users": "Utilisateurs connectés", "Add": "Ajouter", "Add a model ID": "Ajouter un identifiant de modèle", "Add a short description about what this model does": "Ajoutez une brève description de ce que fait ce modèle.", @@ -35,7 +35,7 @@ "Add custom prompt": "Ajouter un prompt personnalisé", "Add Files": "Ajouter des fichiers", "Add Group": "Ajouter un groupe", - "Add Memory": "Ajouter de la mémoire", + "Add Memory": "Ajouter un souvenir", "Add Model": "Ajouter un modèle", "Add Reaction": "Ajouter une réaction", "Add Tag": "Ajouter un tag", @@ -642,14 +642,14 @@ "Max Upload Size": "Limite de taille de téléversement", "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé en même temps. Veuillez réessayer ultérieurement.", "May": "Mai", - "Memories accessible by LLMs will be shown here.": "Les mémoires accessibles par les LLMs seront affichées ici.", + "Memories accessible by LLMs will be shown here.": "Les souvenirs accessibles par les LLMs seront affichées ici.", "Memory": "Mémoire", - "Memory added successfully": "Mémoire ajoutée avec succès", + "Memory added successfully": "Souvenir ajoutée avec succès", "Memory cleared successfully": "La mémoire a été effacée avec succès", - "Memory deleted successfully": "La mémoire a été supprimée avec succès", - "Memory updated successfully": "La mémoire a été mise à jour avec succès", + "Memory deleted successfully": "Le souvenir a été supprimé avec succès", + "Memory updated successfully": "Le souvenir a été mis à jour avec succès", "Merge Responses": "Fusionner les réponses", - "Message rating should be enabled to use this feature": "L'évaluation des messages doit être activée pour utiliser cette fonctionnalité", + "Message rating should be enabled to use this feature": "L'évaluation des messages doit être activée pour pouvoir utiliser cette fonctionnalité", "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Les messages que vous envoyez après avoir créé votre lien ne seront pas partagés. Les utilisateurs disposant de l'URL pourront voir la conversation partagée.", "Min P": "P min", "Minimum Score": "Score minimal", @@ -682,7 +682,7 @@ "More": "Plus", "Name": "Nom d'utilisateur", "Name your knowledge base": "Nommez votre base de connaissances", - "Native": "", + "Native": "Natif", "New Chat": "Nouvelle conversation", "New Folder": "Nouveau dossier", "New Password": "Nouveau mot de passe", @@ -697,7 +697,7 @@ "No HTML, CSS, or JavaScript content found.": "Aucun contenu HTML, CSS ou JavaScript trouvé.", "No inference engine with management support found": "Aucun moteur d'inférence avec support trouvé", "No knowledge found": "Aucune connaissance trouvée", - "No memories to clear": "", + "No memories to clear": "Aucun souvenir à effacer", "No model IDs": "Aucun ID de modèle", "No models found": "Aucun modèle trouvé", "No models selected": "Aucun modèle sélectionné", @@ -725,9 +725,9 @@ "Ollama": "Ollama", "Ollama API": "API Ollama", "Ollama API settings updated": "Paramètres de l'API Ollama mis à jour", - "Ollama Version": "Version Ollama", + "Ollama Version": "Version d'Ollama", "On": "Activé", - "OneDrive": "", + "OneDrive": "OneDrive", "Only alphanumeric characters and hyphens are allowed": "Seuls les caractères alphanumériques et les tirets sont autorisés", "Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Seules les collections peuvent être modifiées, créez une nouvelle base de connaissance pour modifier/ajouter des documents.", @@ -735,7 +735,7 @@ "Oops! Looks like the URL is invalid. Please double-check and try again.": "Oups ! Il semble que l'URL soit invalide. Veuillez vérifier à nouveau et réessayer.", "Oops! There are files still uploading. Please wait for the upload to complete.": "Oups ! Des fichiers sont encore en cours de téléversement. Veuillez patienter jusqu'à la fin du téléversement.", "Oops! There was an error in the previous response.": "Oups ! Il y a eu une erreur dans la réponse précédente.", - "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oups\u00a0! Vous utilisez une méthode non prise en charge (frontend uniquement). Veuillez servir l'interface Web à partir du backend.", + "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oups ! Vous utilisez une méthode non prise en charge (frontend uniquement). Veuillez servir l'interface Web à partir du backend.", "Open file": "Ouvrir le fichier", "Open in full screen": "Ouvrir en plein écran", "Open new chat": "Ouvrir une nouvelle conversation", @@ -761,8 +761,8 @@ "PDF Extract Images (OCR)": "Extraction d'images PDF (OCR)", "pending": "en attente", "Permission denied when accessing media devices": "Accès aux appareils multimédias refusé", - "Permission denied when accessing microphone": "Autorisation refusée lors de l'accès au micro", - "Permission denied when accessing microphone: {{error}}": "Permission refusée lors de l'accès au microphone : {{error}}", + "Permission denied when accessing microphone": "Accès au microphone refusé", + "Permission denied when accessing microphone: {{error}}": "Accès au microphone refusé : {{error}}", "Permissions": "Permissions", "Personalization": "Personnalisation", "Pin": "Épingler", @@ -773,24 +773,24 @@ "Pipelines": "Pipelines", "Pipelines Not Detected": "Aucun pipelines détecté", "Pipelines Valves": "Vannes de pipelines", - "Plain text (.txt)": "Texte simple (.txt)", + "Plain text (.txt)": "Texte (.txt)", "Playground": "Playground", "Please carefully review the following warnings:": "Veuillez lire attentivement les avertissements suivants :", - "Please do not close the settings page while loading the model.": "", + "Please do not close the settings page while loading the model.": "Veuillez ne pas fermer les paramètres pendant le chargement du modèle.", "Please enter a prompt": "Veuillez saisir un prompt", "Please fill in all fields.": "Veuillez remplir tous les champs.", "Please select a model first.": "Veuillez d'abord sélectionner un modèle.", - "Please select a model.": "", + "Please select a model.": "Veuillez sélectionner un modèle.", "Please select a reason": "Veuillez sélectionner une raison", "Port": "Port", "Positive attitude": "Attitude positive", "Prefix ID": "ID de préfixe", "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "Le préfixe ID est utilisé pour éviter les conflits avec d'autres connexions en ajoutant un préfixe aux ID de modèle - laissez vide pour désactiver", - "Presence Penalty": "", + "Presence Penalty": "Pénalité de présence", "Previous 30 days": "30 derniers jours", "Previous 7 days": "7 derniers jours", "Profile Image": "Image de profil", - "Prompt": "", + "Prompt": "Prompt", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (par ex. Dites-moi un fait amusant à propos de l'Empire romain)", "Prompt Content": "Contenu du prompt", "Prompt created successfully": "Prompt créé avec succès", @@ -821,7 +821,7 @@ "Rename": "Renommer", "Reorder Models": "Réorganiser les modèles", "Repeat Last N": "Répéter les N derniers", - "Repeat Penalty (Ollama)": "", + "Repeat Penalty (Ollama)": "Pénalité de répétition (Ollama)", "Reply in Thread": "Répondre dans le fil de discussion", "Request Mode": "Mode de requête", "Reranking Model": "Modèle de ré-ranking", @@ -835,7 +835,7 @@ "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez vérifier les paramètres de votre navigateur pour accorder l'accès nécessaire.", "Response splitting": "Fractionnement de la réponse", "Result": "Résultat", - "Retrieval": "", + "Retrieval": "Récupération", "Retrieval Query Generation": "Génération de requête de RAG", "Rich Text Input for Chat": "Saisie de texte enrichi pour le chat", "RK": "Rang", @@ -885,7 +885,7 @@ "Select a pipeline": "Sélectionnez un pipeline", "Select a pipeline url": "Sélectionnez l'URL du pipeline", "Select a tool": "Sélectionnez un outil", - "Select an auth method": "", + "Select an auth method": "Veuillez sélectionner une méthode de connexion", "Select an Ollama instance": "Sélectionnez une instance Ollama", "Select Engine": "Sélectionnez le moteur", "Select Knowledge": "Sélectionnez une connaissance", @@ -897,8 +897,8 @@ "Send message": "Envoyer un message", "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Envoie `stream_options: { include_usage: true }` dans la requête.\nLes fournisseurs pris en charge renverront des informations sur l'utilisation des tokens dans la réponse lorsque cette option est activée.", "September": "Septembre", - "SerpApi API Key": "", - "SerpApi Engine": "", + "SerpApi API Key": "Clé d'API SerpAPI", + "SerpApi Engine": "Moteur SerpAPI", "Serper API Key": "Clé API Serper", "Serply API Key": "Clé API Serply", "Serpstack API Key": "Clé API Serpstack", @@ -945,7 +945,7 @@ "sk-1234": "sk-1234", "Source": "Source", "Speech Playback Speed": "Vitesse de lecture de la parole", - "Speech recognition error: {{error}}": "Erreur de reconnaissance vocale\u00a0: {{error}}", + "Speech recognition error: {{error}}": "Erreur de reconnaissance vocale : {{error}}", "Speech-to-Text Engine": "Moteur de reconnaissance vocale", "Stop": "Stop", "Stop Sequence": "Séquence d'arrêt", @@ -965,9 +965,9 @@ "Tags Generation": "Génération de tags", "Tags Generation Prompt": "Prompt de génération de tags", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "L'échantillonnage sans queue est utilisé pour réduire l'impact des tokens moins probables dans la sortie. Une valeur plus élevée (par exemple 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)", - "Talk to model": "", + "Talk to model": "Parler au modèle", "Tap to interrupt": "Appuyez pour interrompre", - "Tasks": "", + "Tasks": "Tâches", "Tavily API Key": "Clé API Tavily", "Tell us more:": "Dites-nous en plus à ce sujet : ", "Temperature": "Température", @@ -987,7 +987,7 @@ "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Le classement est actuellement en version bêta et nous pouvons ajuster les calculs de notation à mesure que nous peaufinons l'algorithme.", "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La taille maximale du fichier en Mo. Si la taille du fichier dépasse cette limite, le fichier ne sera pas téléchargé.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Le nombre maximal de fichiers pouvant être utilisés en même temps dans la conversation. Si le nombre de fichiers dépasse cette limite, les fichiers ne seront pas téléchargés.", - "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0%) et 1,0 (100%).", "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La température du modèle. Augmenter la température rendra le modèle plus créatif dans ses réponses. (Par défaut : 0.8)", "Theme": "Thème", "Thinking...": "En train de réfléchir...", @@ -1009,12 +1009,12 @@ "Tika": "Tika", "Tika Server URL required.": "URL du serveur Tika requise.", "Tiktoken": "Tiktoken", - "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Conseil\u00a0: mettez à jour plusieurs emplacements de variables consécutivement en appuyant sur la touche Tab dans l’entrée de chat après chaque remplacement.", + "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Conseil: mettez à jour plusieurs emplacements de variables consécutivement en appuyant sur la touche Tab dans l’entrée de chat après chaque remplacement.", "Title": "Titre", "Title (e.g. Tell me a fun fact)": "Titre (par ex. raconte-moi un fait amusant)", "Title Auto-Generation": "Génération automatique des titres", "Title cannot be an empty string.": "Le titre ne peut pas être une chaîne de caractères vide.", - "Title Generation": "", + "Title Generation": "Génération du Titre", "Title Generation Prompt": "Prompt de génération de titre", "TLS": "TLS", "To access the available model names for downloading,": "Pour accéder aux noms des modèles disponibles,", @@ -1050,7 +1050,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Problèmes d'accès à Ollama ?", - "Trust Proxy Environment": "", + "Trust Proxy Environment": "Faire confiance au proxy de l'environement", "TTS Model": "Modèle de Text-to-Speech", "TTS Settings": "Paramètres de Text-to-Speech", "TTS Voice": "Voix de Text-to-Speech", @@ -1072,7 +1072,7 @@ "Updated": "Mis à jour", "Updated at": "Mise à jour le", "Updated At": "Mise à jour le", - "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Effectuez la mise à niveau vers le plan payant pour bénéficier de fonctionnalités améliorées, notamment les thèmes et le branding personnalisé, ainsi qu'un support dédié.", "Upload": "Téléverser", "Upload a GGUF model": "Téléverser un modèle GGUF", "Upload directory": "Téléverser un dossier", @@ -1101,7 +1101,7 @@ "Valves updated successfully": "Les vannes ont été mises à jour avec succès", "variable": "variable", "variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.", - "Version": "version:", + "Version": "Version:", "Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} de {{totalVersions}}", "View Replies": "Voir les réponses", "Visibility": "Visibilité", @@ -1116,7 +1116,7 @@ "Web API": "API Web", "Web Search": "Recherche Web", "Web Search Engine": "Moteur de recherche Web", - "Web Search in Chat": "", + "Web Search in Chat": "Recherche web depuis le chat", "Web Search Query Generation": "Génération de requête de recherche Web", "Webhook URL": "URL du webhook", "WebUI Settings": "Paramètres de WebUI", @@ -1131,7 +1131,7 @@ "Whisper (Local)": "Whisper (local)", "Why?": "Pourquoi ?", "Widescreen Mode": "Mode grand écran", - "Won": "Gagné", + "Won": "Victoires", "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fonctionne avec le top-k. Une valeur plus élevée (par ex. 0.95) donnera un texte plus diversifié, tandis qu'une valeur plus basse (par ex. 0.5) générera un texte plus concentré et conservateur. (Par défaut : 0.9)", "Workspace": "Espace de travail", "Workspace Permissions": "Autorisations de l'espace de travail", @@ -1155,6 +1155,6 @@ "Your account status is currently pending activation.": "Votre statut de compte est actuellement en attente d'activation.", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "L'intégralité de votre contribution ira directement au développeur du plugin ; Open WebUI ne prend aucun pourcentage. Cependant, la plateforme de financement choisie peut avoir ses propres frais.", "Youtube": "YouTube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "Langue de Youtube", + "Youtube Proxy URL": "URL du proxy YouTube" } From 5e873bc643c92394c567a9ccffdebc0035852457 Mon Sep 17 00:00:00 2001 From: Sara Angel-Murphy Date: Thu, 27 Feb 2025 13:12:54 -0500 Subject: [PATCH 110/623] feat: add AWS workload identity support --- backend/open_webui/storage/provider.py | 38 +++++++++++++------ .../test/apps/webui/storage/test_provider.py | 11 ++++++ 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/backend/open_webui/storage/provider.py b/backend/open_webui/storage/provider.py index 2f31cbdafbf..c5c0056cc45 100644 --- a/backend/open_webui/storage/provider.py +++ b/backend/open_webui/storage/provider.py @@ -101,19 +101,33 @@ def delete_all_files() -> None: class S3StorageProvider(StorageProvider): def __init__(self): - self.s3_client = boto3.client( - "s3", - region_name=S3_REGION_NAME, - endpoint_url=S3_ENDPOINT_URL, - aws_access_key_id=S3_ACCESS_KEY_ID, - aws_secret_access_key=S3_SECRET_ACCESS_KEY, - config=Config( - s3={ - "use_accelerate_endpoint": S3_USE_ACCELERATE_ENDPOINT, - "addressing_style": S3_ADDRESSING_STYLE, - }, - ), + config = Config( + s3={ + "use_accelerate_endpoint": S3_USE_ACCELERATE_ENDPOINT, + "addressing_style": S3_ADDRESSING_STYLE, + }, ) + + # If access key and secret are provided, use them for authentication + if S3_ACCESS_KEY_ID and S3_SECRET_ACCESS_KEY: + self.s3_client = boto3.client( + "s3", + region_name=S3_REGION_NAME, + endpoint_url=S3_ENDPOINT_URL, + aws_access_key_id=S3_ACCESS_KEY_ID, + aws_secret_access_key=S3_SECRET_ACCESS_KEY, + config=config, + ) + else: + # If no explicit credentials are provided, fall back to default AWS credentials + # This supports workload identity (IAM roles for EC2, EKS, etc.) + self.s3_client = boto3.client( + "s3", + region_name=S3_REGION_NAME, + endpoint_url=S3_ENDPOINT_URL, + config=config, + ) + self.bucket_name = S3_BUCKET_NAME self.key_prefix = S3_KEY_PREFIX if S3_KEY_PREFIX else "" diff --git a/backend/open_webui/test/apps/webui/storage/test_provider.py b/backend/open_webui/test/apps/webui/storage/test_provider.py index a5ef1350437..3c874592fe4 100644 --- a/backend/open_webui/test/apps/webui/storage/test_provider.py +++ b/backend/open_webui/test/apps/webui/storage/test_provider.py @@ -187,6 +187,17 @@ def test_delete_all_files(self, monkeypatch, tmp_path): assert not (upload_dir / self.filename).exists() assert not (upload_dir / self.filename_extra).exists() + def test_init_without_credentials(self, monkeypatch): + """Test that S3StorageProvider can initialize without explicit credentials.""" + # Temporarily unset the environment variables + monkeypatch.setattr(provider, "S3_ACCESS_KEY_ID", None) + monkeypatch.setattr(provider, "S3_SECRET_ACCESS_KEY", None) + + # Should not raise an exception + storage = provider.S3StorageProvider() + assert storage.s3_client is not None + assert storage.bucket_name == provider.S3_BUCKET_NAME + class TestGCSStorageProvider: Storage = provider.GCSStorageProvider() From b6873153c1c763a61e4b6926ab6d5d09dc60c38b Mon Sep 17 00:00:00 2001 From: Didier FOURNOUT Date: Thu, 27 Feb 2025 19:32:15 +0000 Subject: [PATCH 111/623] resolve a bug in /api/models/base not returning openai models when ENABLE_FORWARD_USER_INFO_HEADERS is true --- backend/open_webui/main.py | 2 +- backend/open_webui/routers/openai.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 1e6f970c809..9676d144e91 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -972,7 +972,7 @@ def get_filtered_models(models, user): @app.get("/api/models/base") async def get_base_models(request: Request, user=Depends(get_admin_user)): - models = await get_all_base_models(request) + models = await get_all_base_models(request, user=user) return {"data": models} diff --git a/backend/open_webui/routers/openai.py b/backend/open_webui/routers/openai.py index dff2461eac4..990df83b0b8 100644 --- a/backend/open_webui/routers/openai.py +++ b/backend/open_webui/routers/openai.py @@ -67,7 +67,7 @@ async def send_get_request(url, key=None, user: UserModel = None): "X-OpenWebUI-User-Email": user.email, "X-OpenWebUI-User-Role": user.role, } - if ENABLE_FORWARD_USER_INFO_HEADERS + if ENABLE_FORWARD_USER_INFO_HEADERS and user else {} ), }, From 660a44c918a0ff959e616d1ee3bd231d7b57c10e Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 11:37:44 -0800 Subject: [PATCH 112/623] fix: insecure connection over LAN --- src/lib/utils/onedrive-file-picker.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index d26469529e6..7abf5220463 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -1,3 +1,5 @@ +import { v4 as uuidv4 } from 'uuid'; + let CLIENT_ID = ''; async function getCredentials() { @@ -84,7 +86,7 @@ const params = { authentication: {}, messaging: { origin: window?.location?.origin, - channelId: crypto.randomUUID() + channelId: uuidv4() }, typesAndSources: { mode: 'files', From 866d02bbaef827c2c5a7e84710273700d16ea22d Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 11:39:00 -0800 Subject: [PATCH 113/623] refac --- backend/open_webui/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 15982f886fd..5f52d4eeed7 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1500,10 +1500,11 @@ class BannerModel(BaseModel): VECTOR_DB = os.environ.get("VECTOR_DB", "chroma") # Chroma +CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db" + if VECTOR_DB == "chroma": import chromadb - CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db" CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT) CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE) CHROMA_HTTP_HOST = os.environ.get("CHROMA_HTTP_HOST", "") From ed0b9be4e3133e4eb4896edcf3eed4e1a68a6293 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 11:39:59 -0800 Subject: [PATCH 114/623] fix --- src/lib/components/chat/Artifacts.svelte | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/lib/components/chat/Artifacts.svelte b/src/lib/components/chat/Artifacts.svelte index d09858838fd..0a8ab956aba 100644 --- a/src/lib/components/chat/Artifacts.svelte +++ b/src/lib/components/chat/Artifacts.svelte @@ -123,8 +123,6 @@ if (contents.length === 0) { showControls.set(false); showArtifacts.set(false); - - toast.error($i18n.t('No HTML, CSS, or JavaScript content found.')); } selectedContentIdx = contents ? contents.length - 1 : 0; From 94c19f73b09b7d37c9684f314822cee980f4f2cb Mon Sep 17 00:00:00 2001 From: Yui Date: Thu, 27 Feb 2025 20:41:30 +0100 Subject: [PATCH 115/623] add config update when deleting openai model --- src/lib/components/admin/Settings/Connections.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib/components/admin/Settings/Connections.svelte b/src/lib/components/admin/Settings/Connections.svelte index 893330f1c79..2fcfadaec81 100644 --- a/src/lib/components/admin/Settings/Connections.svelte +++ b/src/lib/components/admin/Settings/Connections.svelte @@ -274,6 +274,7 @@ newConfig[newIdx] = OPENAI_API_CONFIGS[newIdx < idx ? newIdx : newIdx + 1]; }); OPENAI_API_CONFIGS = newConfig; + updateOpenAIHandler(); }} /> {/each} From 70667d5cc8dd755f7e439bd6eb1f822fc76e38f0 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 11:45:25 -0800 Subject: [PATCH 116/623] refac --- backend/open_webui/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 5f52d4eeed7..19abbf1436b 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -2416,7 +2416,7 @@ class BannerModel(BaseModel): LDAP_SEARCH_FILTERS = PersistentConfig( "LDAP_SEARCH_FILTER", "ldap.server.search_filter", - os.environ.get("LDAP_SEARCH_FILTER", ""), + os.environ.get("LDAP_SEARCH_FILTER", os.environ.get("LDAP_SEARCH_FILTERS", "")), ) LDAP_USE_TLS = PersistentConfig( From 63d825692ed19e3700ce67e631f8c1974e63880e Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 11:49:59 -0800 Subject: [PATCH 117/623] doc: changelog --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 22bf47ab3a9..bfc9ebca039 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.18] - 2025-02-28 + +### Fixed + +- **🌐 Open WebUI Now Works Over LAN in Insecure Context**: Resolved an issue preventing Open WebUI from functioning when accessed over a local network in an insecure context, ensuring seamless connectivity. +- **🔄 UI Now Reflects Deleted Connections Instantly**: Fixed an issue where deleting a connection did not update the UI in real time, ensuring accurate system state visibility. +- **🛠️ Models Now Display Correctly with ENABLE_FORWARD_USER_INFO_HEADERS**: Addressed a bug where models were not visible when ENABLE_FORWARD_USER_INFO_HEADERS was set, restoring proper model listing. + ## [0.5.17] - 2025-02-27 ### Added From e26c443815a1554cd813d4b54adfda22de886105 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 12:00:23 -0800 Subject: [PATCH 118/623] 0.5.18 --- CHANGELOG.md | 2 +- package-lock.json | 4 ++-- package.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfc9ebca039..29715f6f340 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.5.18] - 2025-02-28 +## [0.5.18] - 2025-02-27 ### Fixed diff --git a/package-lock.json b/package-lock.json index 1d2ad0a1ece..e48c7930e18 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.5.17", + "version": "0.5.18", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.5.17", + "version": "0.5.18", "dependencies": { "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", diff --git a/package.json b/package.json index e0722dedfba..1d2e86741a7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.5.17", + "version": "0.5.18", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", From 54e3ca8e6cb23060aa3cc0539dc795ba947a87a8 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 13:03:28 -0800 Subject: [PATCH 119/623] refac: rm confusing defaults --- .../Settings/Advanced/AdvancedParams.svelte | 42 ++++++++----------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte index 84d9e50353b..f1b8e8e5246 100644 --- a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte +++ b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte @@ -114,7 +114,7 @@
@@ -356,7 +354,7 @@
@@ -857,7 +853,7 @@
@@ -968,7 +962,7 @@
Date: Thu, 27 Feb 2025 13:04:39 -0800 Subject: [PATCH 120/623] chore: format --- src/lib/i18n/locales/ar-BH/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/bg-BG/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/bn-BD/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ca-ES/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ceb-PH/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/cs-CZ/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/da-DK/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/de-DE/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/dg-DG/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/el-GR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/en-GB/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/en-US/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/es-ES/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/eu-ES/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/fa-IR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/fi-FI/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/fr-CA/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/fr-FR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/he-IL/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/hi-IN/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/hr-HR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/hu-HU/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/id-ID/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ie-GA/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/it-IT/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ja-JP/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ka-GE/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ko-KR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/lt-LT/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ms-MY/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/nb-NO/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/nl-NL/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/pa-IN/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/pl-PL/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/pt-BR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/pt-PT/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ro-RO/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ru-RU/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/sk-SK/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/sr-RS/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/sv-SE/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/th-TH/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/tk-TW/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/tr-TR/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/uk-UA/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/ur-PK/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/vi-VN/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/zh-CN/translation.json | 36 ++++++++++---------- src/lib/i18n/locales/zh-TW/translation.json | 36 ++++++++++---------- 49 files changed, 882 insertions(+), 882 deletions(-) diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index a722d15c4c0..d0caabefa99 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "هل تملك حساب ؟", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "مساعد", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "اتصالات", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "الاتصال", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "تم نسخ عنوان URL للدردشة المشتركة إلى الحافظة", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "معلومات", "Input commands": "إدخال الأوامر", "Install from Github URL": "التثبيت من عنوان URL لجيثب", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "سجل صوت", "Redirecting you to Open WebUI Community": "OpenWebUI إعادة توجيهك إلى مجتمع ", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "رفض عندما لا ينبغي أن يكون", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "ضبط الصوت", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "الاعدادات", "Settings saved successfully!": "تم حفظ الاعدادات بنجاح", @@ -964,7 +964,7 @@ "System Prompt": "محادثة النظام", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "شكرا لملاحظاتك!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "يجب أن تكون النتيجة قيمة تتراوح بين 0.0 (0%) و1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "الثيم", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "مساحة العمل", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index cf3a112c4b8..a8acd34d07d 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Разреши прекъсване на гласа по време на разговор", "Allowed Endpoints": "Разрешени крайни точки", "Already have an account?": "Вече имате акаунт?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Алтернатива на top_p, която цели да осигури баланс между качество и разнообразие. Параметърът p представлява минималната вероятност за разглеждане на токен, спрямо вероятността на най-вероятния токен. Например, при p=0.05 и най-вероятен токен с вероятност 0.9, логитите със стойност по-малка от 0.045 се филтрират. (По подразбиране: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Винаги", "Amazing": "Невероятно", "an assistant": "асистент", @@ -208,7 +208,7 @@ "Confirm your new password": "Потвърдете новата си парола", "Connect to your own OpenAI compatible API endpoints.": "Свържете се със собствени крайни точки на API, съвместими с OpenAI.", "Connections": "Връзки", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Ограничава усилията за разсъждение при модели за разсъждение. Приложимо само за модели за разсъждение от конкретни доставчици, които поддържат усилия за разсъждение. (По подразбиране: средно)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Свържете се с администратор за достъп до WebUI", "Content": "Съдържание", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Продължете с имейл", "Continue with LDAP": "Продължете с LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Контролирайте как текстът на съобщението се разделя за TTS заявки. 'Пунктуация' разделя на изречения, 'параграфи' разделя на параграфи, а 'нищо' запазва съобщението като един низ.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Контролирайте повторението на последователности от токени в генерирания текст. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 1.1) ще бъде по-снизходителна. При 1 е изключено. (По подразбиране: 1.1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Контроли", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Контролира баланса между съгласуваност и разнообразие на изхода. По-ниска стойност ще доведе до по-фокусиран и съгласуван текст. (По подразбиране: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Копирано", "Copied shared chat URL to clipboard!": "Копирана е връзката за споделен чат в клипборда!", "Copied to clipboard": "Копирано в клипборда", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Активиране на заключване на паметта (mlock), за да се предотврати изваждането на данните на модела от RAM. Тази опция заключва работния набор от страници на модела в RAM, гарантирайки, че няма да бъдат изхвърлени на диска. Това може да помогне за поддържане на производителността, като се избягват грешки в страниците и се осигурява бърз достъп до данните.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Активиране на мапиране на паметта (mmap) за зареждане на данни на модела. Тази опция позволява на системата да използва дисковото пространство като разширение на RAM, третирайки дисковите файлове, сякаш са в RAM. Това може да подобри производителността на модела, като позволява по-бърз достъп до данните. Въпреки това, може да не работи правилно с всички системи и може да консумира значително количество дисково пространство.", "Enable Message Rating": "Активиране на оценяване на съобщения", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Активиране на Mirostat семплиране за контрол на перплексията. (По подразбиране: 0, 0 = Деактивирано, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Включване на нови регистрации", "Enabled": "Активирано", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверете се, че вашият CSV файл включва 4 колони в следния ред: Име, Имейл, Парола, Роля.", @@ -566,7 +566,7 @@ "Include": "Включи", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Влияе върху това колко бързо алгоритъмът реагира на обратната връзка от генерирания текст. По-ниска скорост на обучение ще доведе до по-бавни корекции, докато по-висока скорост на обучение ще направи алгоритъма по-отзивчив. (По подразбиране: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Информация", "Input commands": "Въведете команди", "Install from Github URL": "Инсталиране от URL адреса на Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "Усилие за разсъждение", "Record voice": "Записване на глас", "Redirecting you to Open WebUI Community": "Пренасочване към OpenWebUI общността", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Намалява вероятността за генериране на безсмислици. По-висока стойност (напр. 100) ще даде по-разнообразни отговори, докато по-ниска стойност (напр. 10) ще бъде по-консервативна. (По подразбиране: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Отнасяйте се към себе си като \"Потребител\" (напр. \"Потребителят учи испански\")", "References from": "Препратки от", "Refused when it shouldn't have": "Отказано, когато не трябва да бъде", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Задайте броя работни нишки, използвани за изчисления. Тази опция контролира колко нишки се използват за едновременна обработка на входящи заявки. Увеличаването на тази стойност може да подобри производителността при високи натоварвания с паралелизъм, но може също да консумира повече CPU ресурси.", "Set Voice": "Задай Глас", "Set whisper model": "Задай модел на шепот", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Задава плоско отклонение срещу токени, които са се появили поне веднъж. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 0.9) ще бъде по-снизходителна. При 0 е деактивирано. (По подразбиране: 0)", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Задава мащабиращо отклонение срещу токени за наказване на повторения, базирано на това колко пъти са се появили. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 0.9) ще бъде по-снизходителна. При 0 е деактивирано. (По подразбиране: 1.1)", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Задава колко назад моделът да гледа, за да предотврати повторение. (По подразбиране: 64, 0 = деактивирано, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Задава семето на случайното число, което да се използва за генериране. Задаването на конкретно число ще накара модела да генерира същия текст за същата подкана. (По подразбиране: случайно)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Задава размера на контекстния прозорец, използван за генериране на следващия токен. (По подразбиране: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Задава последователностите за спиране, които да се използват. Когато се срещне този модел, LLM ще спре да генерира текст и ще се върне. Множество модели за спиране могат да бъдат зададени чрез определяне на множество отделни параметри за спиране в моделния файл.", "Settings": "Настройки", "Settings saved successfully!": "Настройките са запазени успешно!", @@ -964,7 +964,7 @@ "System Prompt": "Системен Промпт", "Tags Generation": "Генериране на тагове", "Tags Generation Prompt": "Промпт за генериране на тагове", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Безопашковото семплиране се използва за намаляване на влиянието на по-малко вероятните токени от изхода. По-висока стойност (напр. 2.0) ще намали влиянието повече, докато стойност 1.0 деактивира тази настройка. (по подразбиране: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Докоснете за прекъсване", "Tasks": "Задачи", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Благодарим ви за вашия отзив!", "The Application Account DN you bind with for search": "DN на акаунта на приложението, с който се свързвате за търсене", "The base to search for users": "Базата за търсене на потребители", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчиците зад този плъгин са страстни доброволци от общността. Ако намирате този плъгин полезен, моля, обмислете да допринесете за неговото развитие.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Класацията за оценка се базира на рейтинговата система Elo и се обновява в реално време.", "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP атрибутът, който съответства на имейла, който потребителите използват за вписване.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максималният размер на файла в MB. Ако размерът на файла надвишава този лимит, файлът няма да бъде качен.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максималният брой файлове, които могат да се използват едновременно в чата. Ако броят на файловете надвишава този лимит, файловете няма да бъдат качени.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Резултатът трябва да бъде стойност между 0.0 (0%) и 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температурата на модела. Увеличаването на температурата ще накара модела да отговаря по-креативно. (По подразбиране: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Тема", "Thinking...": "Мисля...", "This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Тази опция контролира колко токена се запазват при обновяване на контекста. Например, ако е зададено на 2, последните 2 токена от контекста на разговора ще бъдат запазени. Запазването на контекста може да помогне за поддържане на непрекъснатостта на разговора, но може да намали способността за отговор на нови теми. (По подразбиране: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Тази опция ще изтрие всички съществуващи файлове в колекцията и ще ги замени с новокачени файлове.", "This response was generated by \"{{model}}\"": "Този отговор беше генериран от \"{{model}}\"", "This will delete": "Това ще изтрие", @@ -1132,7 +1132,7 @@ "Why?": "Защо?", "Widescreen Mode": "Широкоекранен режим", "Won": "Спечелено", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Работи заедно с top-k. По-висока стойност (напр. 0.95) ще доведе до по-разнообразен текст, докато по-ниска стойност (напр. 0.5) ще генерира по-фокусиран и консервативен текст. (По подразбиране: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Работно пространство", "Workspace Permissions": "Разрешения за работното пространство", "Write": "Напиши", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index e4080ee7f59..7ea3c29ca92 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "আগে থেকেই একাউন্ট আছে?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "একটা এসিস্ট্যান্ট", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "কানেকশনগুলো", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "বিষয়বস্তু", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "শেয়ারকৃত কথা-ব্যবহারের URL ক্লিপবোর্ডে কপি করা হয়েছে!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "নতুন সাইনআপ চালু করুন", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "আপনার সিএসভি ফাইলটিতে এই ক্রমে 4 টি কলাম অন্তর্ভুক্ত রয়েছে তা নিশ্চিত করুন: নাম, ইমেল, পাসওয়ার্ড, ভূমিকা।.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "তথ্য", "Input commands": "ইনপুট কমান্ডস", "Install from Github URL": "Github URL থেকে ইনস্টল করুন", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "ভয়েস রেকর্ড করুন", "Redirecting you to Open WebUI Community": "আপনাকে OpenWebUI কমিউনিটিতে পাঠানো হচ্ছে", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "যদি উপযুক্ত নয়, তবে রেজিগেনেট করা হচ্ছে", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "কন্ঠস্বর নির্ধারণ করুন", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "সেটিংসমূহ", "Settings saved successfully!": "সেটিংগুলো সফলভাবে সংরক্ষিত হয়েছে", @@ -964,7 +964,7 @@ "System Prompt": "সিস্টেম প্রম্পট", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "আপনার মতামত ধন্যবাদ!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "স্কোর একটি 0.0 (0%) এবং 1.0 (100%) এর মধ্যে একটি মান হওয়া উচিত।", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "থিম", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "ওয়ার্কস্পেস", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index d58420aa4c9..fae9b4f5a51 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Permetre la interrupció de la veu en una trucada", "Allowed Endpoints": "Punts d'accés permesos", "Already have an account?": "Ja tens un compte?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045. (Per defecte: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Sempre", "Amazing": "Al·lucinant", "an assistant": "un assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "Confirma la teva nova contrasenya", "Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI", "Connections": "Connexions", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament. (Per defecte: mitjà)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI", "Content": "Contingut", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Continuar amb el correu", "Continue with LDAP": "Continuar amb LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar com es divideix el text del missatge per a les sol·licituds TTS. 'Puntuació' divideix en frases, 'paràgrafs' divideix en paràgrafs i 'cap' manté el missatge com una cadena única.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Controlar la repetició de seqüències de tokens en el text generat. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 1,1) serà més indulgent. A l'1, està desactivat. (Per defecte: 1.1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Controls", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar l'equilibri entre la coherència i la diversitat de la sortida. Un valor més baix donarà lloc a un text més enfocat i coherent. (Per defecte: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Copiat", "Copied shared chat URL to clipboard!": "S'ha copiat l'URL compartida al porta-retalls!", "Copied to clipboard": "Copiat al porta-retalls", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activar l'assignació de memòria (mmap) per carregar les dades del model. Aquesta opció permet que el sistema utilitzi l'emmagatzematge en disc com a extensió de la memòria RAM tractant els fitxers de disc com si estiguessin a la memòria RAM. Això pot millorar el rendiment del model permetent un accés més ràpid a les dades. Tanmateix, és possible que no funcioni correctament amb tots els sistemes i pot consumir una quantitat important d'espai en disc.", "Enable Message Rating": "Permetre la qualificació de missatges", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activar el mostreig de Mirostat per controlar la perplexitat. (Per defecte: 0, 0 = Inhabilitat, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Permetre nous registres", "Enabled": "Habilitat", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.", @@ -566,7 +566,7 @@ "Include": "Incloure", "Include `--api-auth` flag when running stable-diffusion-webui": "Inclou `--api-auth` quan executis stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible. (Per defecte: 0,1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informació", "Input commands": "Entra comandes", "Install from Github URL": "Instal·lar des de l'URL de Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "Esforç de raonament", "Record voice": "Enregistrar la veu", "Redirecting you to Open WebUI Community": "Redirigint-te a la comunitat OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Redueix la probabilitat de generar ximpleries. Un valor més alt (p. ex. 100) donarà respostes més diverses, mentre que un valor més baix (p. ex. 10) serà més conservador. (Per defecte: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Fes referència a tu mateix com a \"Usuari\" (p. ex., \"L'usuari està aprenent espanyol\")", "References from": "Referències de", "Refused when it shouldn't have": "Refusat quan no hauria d'haver estat", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establir el nombre de fils de treball utilitzats per al càlcul. Aquesta opció controla quants fils s'utilitzen per processar les sol·licituds entrants simultàniament. Augmentar aquest valor pot millorar el rendiment amb càrregues de treball de concurrència elevada, però també pot consumir més recursos de CPU.", "Set Voice": "Establir la veu", "Set whisper model": "Establir el model whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Estableix un biaix pla contra tokens que han aparegut almenys una vegada. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 0)", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Estableix un biaix d'escala contra tokens per penalitzar les repeticions, en funció de quantes vegades han aparegut. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 1.1)", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establir fins a quin punt el model mira enrere per evitar la repetició. (Per defecte: 64, 0 = desactivat, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establir la llavor del nombre aleatori que s'utilitzarà per a la generació. Establir-ho a un número específic farà que el model generi el mateix text per a la mateixa sol·licitud. (Per defecte: aleatori)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Estableix la mida de la finestra de context utilitzada per generar el següent token. (Per defecte: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Establir les seqüències d'aturada a utilitzar. Quan es trobi aquest patró, el LLM deixarà de generar text. Es poden establir diversos patrons de parada especificant diversos paràmetres de parada separats en un fitxer model.", "Settings": "Preferències", "Settings saved successfully!": "Les preferències s'han desat correctament", @@ -964,7 +964,7 @@ "System Prompt": "Indicació del Sistema", "Tags Generation": "Generació d'etiquetes", "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració. (per defecte: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Prem per interrompre", "Tasks": "Tasques", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Gràcies pel teu comentari!", "The Application Account DN you bind with for search": "El DN del compte d'aplicació per realitzar la cerca", "The base to search for users": "La base per cercar usuaris", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "La mida del lot determina quantes sol·licituds de text es processen alhora. Una mida de lot més gran pot augmentar el rendiment i la velocitat del model, però també requereix més memòria. (Per defecte: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Els desenvolupadors d'aquest complement són voluntaris apassionats de la comunitat. Si trobeu útil aquest complement, considereu contribuir al seu desenvolupament.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La classificació d'avaluació es basa en el sistema de qualificació Elo i s'actualitza en temps real.", "The LDAP attribute that maps to the mail that users use to sign in.": "L'atribut LDAP que s'associa al correu que els usuaris utilitzen per iniciar la sessió.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La mida màxima del fitxer en MB. Si la mida del fitxer supera aquest límit, el fitxer no es carregarà.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "El nombre màxim de fitxers que es poden utilitzar alhora al xat. Si el nombre de fitxers supera aquest límit, els fitxers no es penjaran.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "El valor de puntuació hauria de ser entre 0.0 (0%) i 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La temperatura del model. Augmentar la temperatura farà que el model respongui de manera més creativa. (Per defecte: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Pensant...", "This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes. (Per defecte: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Aquesta opció estableix el nombre màxim de tokens que el model pot generar en la seva resposta. Augmentar aquest límit permet que el model proporcioni respostes més llargues, però també pot augmentar la probabilitat que es generi contingut poc útil o irrellevant. (Per defecte: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aquesta opció eliminarà tots els fitxers existents de la col·lecció i els substituirà per fitxers recentment penjats.", "This response was generated by \"{{model}}\"": "Aquesta resposta l'ha generat el model \"{{model}}\"", "This will delete": "Això eliminarà", @@ -1132,7 +1132,7 @@ "Why?": "Per què?", "Widescreen Mode": "Mode de pantalla ampla", "Won": "Ha guanyat", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona juntament amb top-k. Un valor més alt (p. ex., 0,95) donarà lloc a un text més divers, mentre que un valor més baix (p. ex., 0,5) generarà un text més concentrat i conservador. (Per defecte: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espai de treball", "Workspace Permissions": "Permisos de l'espai de treball", "Write": "Escriure", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index a957731715f..bc0888216a5 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "Naa na kay account ?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "usa ka katabang", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Mga koneksyon", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "Kontento", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "I-enable ang bag-ong mga rehistro", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "", "Input commands": "Pagsulod sa input commands", "Install from Github URL": "", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Irekord ang tingog", "Redirecting you to Open WebUI Community": "Gi-redirect ka sa komunidad sa OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Ibutang ang tingog", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Mga setting", "Settings saved successfully!": "Malampuson nga na-save ang mga setting!", @@ -964,7 +964,7 @@ "System Prompt": "Madasig nga Sistema", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 1807c08b413..79c13b52afa 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Povolit přerušení hlasu při hovoru", "Allowed Endpoints": "", "Already have an account?": "Už máte účet?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "asistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Připojení", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktujte administrátora pro přístup k webovému rozhraní.", "Content": "Obsah", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Řízení, jak se text zprávy rozděluje pro požadavky TTS. 'Punctuation' rozděluje text na věty, 'paragraphs' rozděluje text na odstavce a 'none' udržuje zprávu jako jeden celý řetězec.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Ovládací prvky", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Zkopírováno", "Copied shared chat URL to clipboard!": "URL sdíleného chatu zkopírován do schránky!", "Copied to clipboard": "Zkopírováno do schránky", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Povolit hodnocení zpráv", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Povolit nové registrace", "Enabled": "Povoleno", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ujistěte se, že váš CSV soubor obsahuje 4 sloupce v tomto pořadí: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "Zahrnout", "Include `--api-auth` flag when running stable-diffusion-webui": "Zahrňte přepínač `--api-auth` při spuštění stable-diffusion-webui.", "Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Vstupní příkazy", "Install from Github URL": "Instalace z URL adresy Githubu", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Nahrát hlas", "Redirecting you to Open WebUI Community": "Přesměrování na komunitu OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odkazujte na sebe jako na \"uživatele\" (např. \"Uživatel se učí španělsky\").", "References from": "Reference z", "Refused when it shouldn't have": "Odmítnuto, když nemělo být.", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Nastavit hlas", "Set whisper model": "Nastavit model whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Nastavení", "Settings saved successfully!": "Nastavení byla úspěšně uložena!", @@ -964,7 +964,7 @@ "System Prompt": "Systémový prompt", "Tags Generation": "", "Tags Generation Prompt": "Prompt pro generování značek", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Klepněte pro přerušení", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Děkujeme za vaši zpětnou vazbu!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojáři stojící za tímto pluginem jsou zapálení dobrovolníci z komunity. Pokud považujete tento plugin za užitečný, zvažte příspěvek k jeho vývoji.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotící žebříček je založen na systému hodnocení Elo a je aktualizován v reálném čase.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maximální velikost souboru v MB. Pokud velikost souboru překročí tento limit, soubor nebude nahrán.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maximální počet souborů, které mohou být použity najednou v chatu. Pokud počet souborů překročí tento limit, soubory nebudou nahrány.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skóre by mělo být hodnotou mezi 0,0 (0%) a 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Téma", "Thinking...": "Přemýšlím...", "This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tato volba odstraní všechny existující soubory ve sbírce a nahradí je nově nahranými soubory.", "This response was generated by \"{{model}}\"": "Tato odpověď byla vygenerována pomocí \"{{model}}\"", "This will delete": "Tohle odstraní", @@ -1132,7 +1132,7 @@ "Why?": "Proč?", "Widescreen Mode": "Režim širokoúhlého zobrazení", "Won": "Vyhrál", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 82f19119c2c..3762f248333 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Tillad afbrydelser i stemme i opkald", "Allowed Endpoints": "", "Already have an account?": "Har du allerede en profil?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "en assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Forbindelser", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakt din administrator for adgang til WebUI", "Content": "Indhold", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontroller hvordan beskedens tekst bliver splittet til TTS requests. 'Punctuation' (tegnsætning) splitter i sætninger, 'paragraphs' splitter i paragraffer, og 'none' beholder beskeden som en samlet streng.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Indstillinger", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopieret", "Copied shared chat URL to clipboard!": "Link til deling kopieret til udklipsholder", "Copied to clipboard": "Kopieret til udklipsholder", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Aktiver rating af besked", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktiver nye signups", "Enabled": "Aktiveret", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at din CSV-fil indeholder 4 kolonner in denne rækkefølge: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "Inkluder `--api-auth` flag, når du kører stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Inputkommandoer", "Install from Github URL": "Installer fra Github URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Optag stemme", "Redirecting you to Open WebUI Community": "Omdirigerer dig til OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referer til dig selv som \"Bruger\" (f.eks. \"Bruger lærer spansk\")", "References from": "", "Refused when it shouldn't have": "Afvist, når den ikke burde have været det", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Indstil stemme", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Indstillinger", "Settings saved successfully!": "Indstillinger gemt!", @@ -964,7 +964,7 @@ "System Prompt": "Systemprompt", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Tryk for at afbryde", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Tak for din feedback!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Udviklerne bag dette plugin er passionerede frivillige fra fællesskabet. Hvis du finder dette plugin nyttigt, kan du overveje at bidrage til dets udvikling.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Den maksimale filstørrelse i MB. Hvis filstørrelsen overstiger denne grænse, uploades filen ikke.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Det maksimale antal filer, der kan bruges på én gang i chatten. Hvis antallet af filer overstiger denne grænse, uploades filerne ikke.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Scoren skal være en værdi mellem 0,0 (0%) og 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Tænker...", "This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Denne indstilling sletter alle eksisterende filer i samlingen og erstatter dem med nyligt uploadede filer.", "This response was generated by \"{{model}}\"": "", "This will delete": "Dette vil slette", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Widescreen-tilstand", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Arbejdsområde", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index ba1d2f31a3a..c66825fd3a6 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Unterbrechung durch Stimme im Anruf zulassen", "Allowed Endpoints": "Erlaubte Endpunkte", "Already have an account?": "Haben Sie bereits einen Account?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternative zu top_p und zielt darauf ab, ein Gleichgewicht zwischen Qualität und Vielfalt zu gewährleisten. Der Parameter p repräsentiert die Mindestwahrscheinlichkeit für ein Token, um berücksichtigt zu werden, relativ zur Wahrscheinlichkeit des wahrscheinlichsten Tokens. Zum Beispiel, bei p=0.05 und das wahrscheinlichste Token hat eine Wahrscheinlichkeit von 0.9, werden Logits mit einem Wert von weniger als 0.045 herausgefiltert. (Standard: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Immer", "Amazing": "Fantastisch", "an assistant": "ein Assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "Neues Passwort bestätigen", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Verbindungen", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Beschränkt den Aufwand für das Schlussfolgern bei Schlussfolgerungsmodellen. Nur anwendbar auf Schlussfolgerungsmodelle von spezifischen Anbietern, die den Schlussfolgerungsaufwand unterstützen. (Standard: medium)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche", "Content": "Info", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Mit Email fortfahren", "Continue with LDAP": "Mit LDAP fortfahren", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrollieren Sie, wie Nachrichtentext für TTS-Anfragen aufgeteilt wird. 'Punctuation' teilt in Sätze auf, 'paragraphs' teilt in Absätze auf und 'none' behält die Nachricht als einzelnen String.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Steuerung", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Kontrolliert das Gleichgewicht zwischen Kohärenz und Vielfalt des Ausgabetextes. Ein niedrigerer Wert führt zu fokussierterem und kohärenterem Text. (Standard: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopiert", "Copied shared chat URL to clipboard!": "Freigabelink in die Zwischenablage kopiert!", "Copied to clipboard": "In die Zwischenablage kopiert", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.", "Enable Message Rating": "Nachrichtenbewertung aktivieren", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat Sampling zur Steuerung der Perplexität aktivieren. (Standard: 0, 0 = Deaktiviert, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Registrierung erlauben", "Enabled": "Aktiviert", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Stellen Sie sicher, dass Ihre CSV-Datei 4 Spalten in dieser Reihenfolge enthält: Name, E-Mail, Passwort, Rolle.", @@ -566,7 +566,7 @@ "Include": "Einschließen", "Include `--api-auth` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api-auth` hinzu", "Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Beeinflusst, wie schnell der Algorithmus auf Feedback aus dem generierten Text reagiert. Eine niedrigere Lernrate führt zu langsameren Anpassungen, während eine höhere Lernrate den Algorithmus reaktionsschneller macht. (Standard: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Eingabebefehle", "Install from Github URL": "Installiere von der Github-URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "Schlussfolgerungsaufwand", "Record voice": "Stimme aufnehmen", "Redirecting you to Open WebUI Community": "Sie werden zur OpenWebUI-Community weitergeleitet", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduziert die Wahrscheinlichkeit, Unsinn zu generieren. Ein höherer Wert (z.B. 100) liefert vielfältigere Antworten, während ein niedrigerer Wert (z.B. 10) konservativer ist. (Standard: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Beziehen Sie sich auf sich selbst als \"Benutzer\" (z. B. \"Benutzer lernt Spanisch\")", "References from": "Referenzen aus", "Refused when it shouldn't have": "Abgelehnt, obwohl es nicht hätte abgelehnt werden sollen", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Legt die Anzahl der für die Berechnung verwendeten GPU-Geräte fest. Diese Option steuert, wie viele GPU-Geräte (falls verfügbar) zur Verarbeitung eingehender Anfragen verwendet werden. Eine Erhöhung dieses Wertes kann die Leistung für Modelle, die für GPU-Beschleunigung optimiert sind, erheblich verbessern, kann jedoch auch mehr Strom und GPU-Ressourcen verbrauchen.", "Set Voice": "Stimme festlegen", "Set whisper model": "Whisper-Modell festlegen", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Legt fest, wie weit das Modell zurückblicken soll, um Wiederholungen zu verhindern. (Standard: 64, 0 = deaktiviert, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Legt den Zufallszahlengenerator-Seed für die Generierung fest. Wenn dieser auf eine bestimmte Zahl gesetzt wird, erzeugt das Modell denselben Text für denselben Prompt. (Standard: zufällig)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Legt die Größe des Kontextfensters fest, das zur Generierung des nächsten Tokens verwendet wird. (Standard: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Legt die zu verwendenden Stoppsequenzen fest. Wenn dieses Muster erkannt wird, stoppt das LLM die Textgenerierung und gibt zurück. Mehrere Stoppmuster können festgelegt werden, indem mehrere separate Stopp-Parameter in einer Modelldatei angegeben werden.", "Settings": "Einstellungen", "Settings saved successfully!": "Einstellungen erfolgreich gespeichert!", @@ -964,7 +964,7 @@ "System Prompt": "System-Prompt", "Tags Generation": "Tag-Generierung", "Tags Generation Prompt": "Prompt für Tag-Generierung", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Zum Unterbrechen tippen", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Danke für Ihr Feedback!", "The Application Account DN you bind with for search": "Der Anwendungs-Konto-DN, mit dem Sie für die Suche binden", "The base to search for users": "Die Basis, in der nach Benutzern gesucht wird", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Die Batch-Größe bestimmt, wie viele Textanfragen gleichzeitig verarbeitet werden. Eine größere Batch-Größe kann die Leistung und Geschwindigkeit des Modells erhöhen, erfordert jedoch auch mehr Speicher. (Standard: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Die Entwickler hinter diesem Plugin sind leidenschaftliche Freiwillige aus der Community. Wenn Sie dieses Plugin hilfreich finden, erwägen Sie bitte, zu seiner Entwicklung beizutragen.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Die Bewertungs-Bestenliste basiert auf dem Elo-Bewertungssystem und wird in Echtzeit aktualisiert.", "The LDAP attribute that maps to the mail that users use to sign in.": "Das LDAP-Attribut, das der Mail zugeordnet ist, die Benutzer zum Anmelden verwenden.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Die maximale Dateigröße in MB. Wenn die Dateigröße dieses Limit überschreitet, wird die Datei nicht hochgeladen.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig in der Unterhaltung verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Die Temperatur des Modells. Eine Erhöhung der Temperatur führt dazu, dass das Modell kreativer antwortet. (Standard: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Design", "Thinking...": "Denke nach...", "This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Diese Option steuert, wie viele Tokens beim Aktualisieren des Kontexts beibehalten werden. Wenn sie beispielsweise auf 2 gesetzt ist, werden die letzten 2 Tokens des Gesprächskontexts beibehalten. Das Beibehalten des Kontexts kann helfen, die Kontinuität eines Gesprächs aufrechtzuerhalten, kann jedoch die Fähigkeit verringern, auf neue Themen zu reagieren. (Standard: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Diese Option legt die maximale Anzahl von Tokens fest, die das Modell in seiner Antwort generieren kann. Eine Erhöhung dieses Limits ermöglicht es dem Modell, längere Antworten zu geben, kann jedoch auch die Wahrscheinlichkeit erhöhen, dass unhilfreicher oder irrelevanter Inhalt generiert wird. (Standard: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Diese Option löscht alle vorhandenen Dateien in der Sammlung und ersetzt sie durch neu hochgeladene Dateien.", "This response was generated by \"{{model}}\"": "Diese Antwort wurde von \"{{model}}\" generiert", "This will delete": "Dies löscht", @@ -1132,7 +1132,7 @@ "Why?": "Warum?", "Widescreen Mode": "Breitbildmodus", "Won": "Gewonnen", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funktioniert zusammen mit top-k. Ein höherer Wert (z.B. 0,95) führt zu vielfältigerem Text, während ein niedrigerer Wert (z.B. 0,5) fokussierteren und konservativeren Text erzeugt. (Standard: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Arbeitsbereich", "Workspace Permissions": "Arbeitsbereichsberechtigungen", "Write": "Schreiben", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index e410f2943d4..fac8fcd9862 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "Such account exists?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "such assistant", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Connections", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "Content", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Enable New Bark Ups", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "", "Input commands": "Input commands", "Install from Github URL": "", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Record Bark", "Redirecting you to Open WebUI Community": "Redirecting you to Open WebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Set Voice so speak", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Settings much settings", "Settings saved successfully!": "Settings saved successfully! Very success!", @@ -964,7 +964,7 @@ "System Prompt": "System Prompt much prompt", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Theme much theme", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index 076e9c96601..614b6de688a 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Επιτρέπεται η Παύση Φωνής στην Κλήση", "Allowed Endpoints": "", "Already have an account?": "Έχετε ήδη λογαριασμό;", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Εναλλακτικό στο top_p, και στοχεύει στη διασφάλιση μιας ισορροπίας μεταξύ ποιότητας και ποικιλίας. Η παράμετρος p αντιπροσωπεύει την ελάχιστη πιθανότητα για ένα token να θεωρηθεί, σε σχέση με την πιθανότητα του πιο πιθανού token. Για παράδειγμα, με p=0.05 και το πιο πιθανό token να έχει πιθανότητα 0.9, τα logits με τιμή μικρότερη από 0.045 φιλτράρονται. (Προεπιλογή: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Καταπληκτικό", "an assistant": "ένας βοηθός", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Συνδέσεις", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Επικοινωνήστε με τον Διαχειριστή για Πρόσβαση στο WebUI", "Content": "Περιεχόμενο", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Συνέχεια με Email", "Continue with LDAP": "Συνέχεια με LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Έλεγχος πώς διαχωρίζεται το κείμενο του μηνύματος για αιτήματα TTS. Το 'Στίξη' διαχωρίζει σε προτάσεις, οι 'παραγράφοι' σε παραγράφους, και το 'κανένα' κρατά το μήνυμα ως μια αλυσίδα.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Έλεγχοι", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Διαχειρίζεται την ισορροπία μεταξύ συνεκτικότητας και ποικιλίας της εξόδου. Μια χαμηλότερη τιμή θα έχει ως αποτέλεσμα πιο εστιασμένο και συνεκτικό κείμενο. (Προεπιλογή: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Αντιγράφηκε", "Copied shared chat URL to clipboard!": "Αντιγράφηκε το URL της κοινόχρηστης συνομιλίας στο πρόχειρο!", "Copied to clipboard": "Αντιγράφηκε στο πρόχειρο", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ενεργοποίηση Κλείδωσης Μνήμης (mlock) για την αποτροπή της ανταλλαγής δεδομένων του μοντέλου από τη μνήμη RAM. Αυτή η επιλογή κλειδώνει το σύνολο εργασίας των σελίδων του μοντέλου στη μνήμη RAM, διασφαλίζοντας ότι δεν θα ανταλλαχθούν στο δίσκο. Αυτό μπορεί να βοηθήσει στη διατήρηση της απόδοσης αποφεύγοντας σφάλματα σελίδων και διασφαλίζοντας γρήγορη πρόσβαση στα δεδομένα.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ενεργοποίηση Χαρτογράφησης Μνήμης (mmap) για φόρτωση δεδομένων μοντέλου. Αυτή η επιλογή επιτρέπει στο σύστημα να χρησιμοποιεί αποθήκευση δίσκου ως επέκταση της μνήμης RAM, αντιμετωπίζοντας αρχεία δίσκου σαν να ήταν στη μνήμη RAM. Αυτό μπορεί να βελτιώσει την απόδοση του μοντέλου επιτρέποντας γρηγορότερη πρόσβαση στα δεδομένα. Ωστόσο, μπορεί να μην λειτουργεί σωστά με όλα τα συστήματα και να καταναλώνει σημαντικό χώρο στο δίσκο.", "Enable Message Rating": "Ενεργοποίηση Αξιολόγησης Μηνυμάτων", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ενεργοποίηση δειγματοληψίας Mirostat για έλεγχο της περιπλοκότητας. (Προεπιλογή: 0, 0 = Απενεργοποιημένο, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ενεργοποίηση Νέων Εγγραφών", "Enabled": "Ενεργοποιημένο", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Βεβαιωθείτε ότι το αρχείο CSV σας περιλαμβάνει 4 στήλες με αυτή τη σειρά: Όνομα, Email, Κωδικός, Ρόλος.", @@ -566,7 +566,7 @@ "Include": "Συμπερίληψη", "Include `--api-auth` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api-auth` όταν τρέχετε το stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Επηρεάζει πόσο γρήγορα ανταποκρίνεται ο αλγόριθμος στην ανατροφοδότηση από το παραγόμενο κείμενο. Μια χαμηλότερη ταχύτητα μάθησης θα έχει ως αποτέλεσμα πιο αργές προσαρμογές, ενώ μια υψηλότερη ταχύτητα μάθησης θα κάνει τον αλγόριθμο πιο ανταποκρινόμενο. (Προεπιλογή: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Πληροφορίες", "Input commands": "Εισαγωγή εντολών", "Install from Github URL": "Εγκατάσταση από URL Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Εγγραφή φωνής", "Redirecting you to Open WebUI Community": "Μετακατεύθυνση στην Κοινότητα OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Μειώνει την πιθανότητα δημιουργίας ανοησιών. Μια υψηλότερη τιμή (π.χ. 100) θα δώσει πιο ποικίλες απαντήσεις, ενώ μια χαμηλότερη τιμή (π.χ. 10) θα δημιουργήσει πιο συντηρητικές απαντήσεις. (Προεπιλογή: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Αναφέρεστε στον εαυτό σας ως \"User\" (π.χ., \"User μαθαίνει Ισπανικά\")", "References from": "Αναφορές από", "Refused when it shouldn't have": "Αρνήθηκε όταν δεν έπρεπε", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ορισμός του αριθμού των νημάτων εργασίας που χρησιμοποιούνται για υπολογισμούς. Αυτή η επιλογή ελέγχει πόσα νήματα χρησιμοποιούνται για την επεξεργασία των εισερχόμενων αιτημάτων ταυτόχρονα. Η αύξηση αυτής της τιμής μπορεί να βελτιώσει την απόδοση σε εργασίες υψηλής συγχρονισμένης φόρτωσης αλλά μπορεί επίσης να καταναλώσει περισσότερους πόρους CPU.", "Set Voice": "Ορισμός Φωνής", "Set whisper model": "Ορισμός μοντέλου whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Ορίζει πόσο πίσω θα κοιτάξει το μοντέλο για να αποτρέψει την επανάληψη. (Προεπιλογή: 64, 0 = απενεργοποιημένο, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ορίζει τον τυχαίο σπόρο αριθμού που θα χρησιμοποιηθεί για τη δημιουργία. Ορισμός αυτού σε έναν συγκεκριμένο αριθμό θα κάνει το μοντέλο να δημιουργεί το ίδιο κείμενο για την ίδια προτροπή. (Προεπιλογή: τυχαίο)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Ορίζει το μέγεθος του παραθύρου πλαισίου που χρησιμοποιείται για τη δημιουργία του επόμενου token. (Προεπιλογή: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ορίζει τις σειρές παύσης που θα χρησιμοποιηθούν. Όταν εντοπιστεί αυτό το μοτίβο, το LLM θα σταματήσει να δημιουργεί κείμενο και θα επιστρέψει. Πολλαπλά μοτίβα παύσης μπορούν να οριστούν καθορίζοντας πολλαπλές ξεχωριστές παραμέτρους παύσης σε ένα αρχείο μοντέλου.", "Settings": "Ρυθμίσεις", "Settings saved successfully!": "Οι Ρυθμίσεις αποθηκεύτηκαν με επιτυχία!", @@ -964,7 +964,7 @@ "System Prompt": "Προτροπή Συστήματος", "Tags Generation": "", "Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Η δειγματοληψία Tail free χρησιμοποιείται για να μειώσει την επίδραση των λιγότερο πιθανών tokens από την έξοδο. Μια υψηλότερη τιμή (π.χ., 2.0) θα μειώσει την επίδραση περισσότερο, ενώ μια τιμή 1.0 απενεργοποιεί αυτή τη ρύθμιση. (προεπιλογή: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Πατήστε για παύση", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Ευχαριστούμε για την ανατροφοδότησή σας!", "The Application Account DN you bind with for search": "Το DN του Λογαριασμού Εφαρμογής που συνδέετε για αναζήτηση", "The base to search for users": "Η βάση για αναζήτηση χρηστών", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Το μέγεθος παρτίδας καθορίζει πόσες αιτήσεις κειμένου επεξεργάζονται μαζί ταυτόχρονα. Ένα μεγαλύτερο μέγεθος παρτίδας μπορεί να αυξήσει την απόδοση και την ταχύτητα του μοντέλου, αλλά απαιτεί επίσης περισσότερη μνήμη. (Προεπιλογή: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Οι προγραμματιστές πίσω από αυτό το plugin είναι παθιασμένοι εθελοντές από την κοινότητα. Αν βρείτε αυτό το plugin χρήσιμο, παρακαλώ σκεφτείτε να συνεισφέρετε στην ανάπτυξή του.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Η κατάταξη αξιολόγησης βασίζεται στο σύστημα βαθμολόγησης Elo και ενημερώνεται σε πραγματικό χρόνο.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Το μέγιστο μέγεθος αρχείου σε MB. Αν το μέγεθος του αρχείου υπερβαίνει αυτό το όριο, το αρχείο δεν θα ανεβεί.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Ο μέγιστος αριθμός αρχείων που μπορούν να χρησιμοποιηθούν ταυτόχρονα στη συνομιλία. Αν ο αριθμός των αρχείων υπερβαίνει αυτό το όριο, τα αρχεία δεν θα ανεβούν.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Η βαθμολογία θα πρέπει να είναι μια τιμή μεταξύ 0.0 (0%) και 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Η θερμοκρασία του μοντέλου. Η αύξηση της θερμοκρασίας θα κάνει το μοντέλο να απαντά πιο δημιουργικά. (Προεπιλογή: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Θέμα", "Thinking...": "Σκέφτομαι...", "This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Αυτή η επιλογή ελέγχει πόσα tokens διατηρούνται κατά την ανανέωση του πλαισίου. Για παράδειγμα, αν οριστεί σε 2, τα τελευταία 2 tokens του πλαισίου συνομιλίας θα διατηρηθούν. Η διατήρηση του πλαισίου μπορεί να βοηθήσει στη διατήρηση της συνέχειας μιας συνομιλίας, αλλά μπορεί να μειώσει την ικανότητα ανταπόκρισης σε νέα θέματα. (Προεπιλογή: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Αυτή η επιλογή ορίζει τον μέγιστο αριθμό tokens που μπορεί να δημιουργήσει το μοντέλο στην απάντησή του. Η αύξηση αυτού του ορίου επιτρέπει στο μοντέλο να παρέχει μεγαλύτερες απαντήσεις, αλλά μπορεί επίσης να αυξήσει την πιθανότητα δημιουργίας αχρήσιμου ή άσχετου περιεχομένου. (Προεπιλογή: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Αυτή η επιλογή θα διαγράψει όλα τα υπάρχοντα αρχεία στη συλλογή και θα τα αντικαταστήσει με νέα ανεβασμένα αρχεία.", "This response was generated by \"{{model}}\"": "Αυτή η απάντηση δημιουργήθηκε από \"{{model}}\"", "This will delete": "Αυτό θα διαγράψει", @@ -1132,7 +1132,7 @@ "Why?": "Γιατί?", "Widescreen Mode": "Λειτουργία Οθόνης Ευρείας", "Won": "Κέρδισε", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Συνεργάζεται μαζί με top-k. Μια υψηλότερη τιμή (π.χ., 0.95) θα οδηγήσει σε πιο ποικίλο κείμενο, ενώ μια χαμηλότερη τιμή (π.χ., 0.5) θα δημιουργήσει πιο εστιασμένο και συντηρητικό κείμενο. (Προεπιλογή: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Χώρος Εργασίας", "Workspace Permissions": "Δικαιώματα Χώρου Εργασίας", "Write": "", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index 9bebdd923b8..d0930120ca0 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "", "Input commands": "", "Install from Github URL": "", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "", "Redirecting you to Open WebUI Community": "", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "", "Settings saved successfully!": "", @@ -964,7 +964,7 @@ "System Prompt": "", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index 9bebdd923b8..d0930120ca0 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "", "Input commands": "", "Install from Github URL": "", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "", "Redirecting you to Open WebUI Community": "", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "", "Settings saved successfully!": "", @@ -964,7 +964,7 @@ "System Prompt": "", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index 79b03eeb630..56b805639fa 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Permitir interrupción de voz en llamada", "Allowed Endpoints": "Endpoints permitidos", "Already have an account?": "¿Ya tienes una cuenta?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa a top_p, y busca asegurar un equilibrio entre calidad y variedad. El parámetro p representa la probabilidad mínima para que un token sea considerado, en relación con la probabilidad del token más probable. Por ejemplo, con p=0.05 y el token más probable con una probabilidad de 0.9, los logits con un valor menor a 0.045 son filtrados. (Predeterminado: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Siempre", "Amazing": "Sorprendente", "an assistant": "un asistente", @@ -208,7 +208,7 @@ "Confirm your new password": "Confirmar tu nueva contraseña", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Conexiones", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": " Restringe el esfuerzo en la razonamiento para los modelos de razonamiento. Solo aplicable a los modelos de razonamiento de proveedores específicos que admiten el esfuerzo de razonamiento. (Por defecto: medio)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contacta el administrador para obtener acceso al WebUI", "Content": "Contenido", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Continuar con email", "Continue with LDAP": "Continuar con LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar como el texto del mensaje se divide para las solicitudes de TTS. 'Punctuation' divide en oraciones, 'paragraphs' divide en párrafos y 'none' mantiene el mensaje como una sola cadena.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Controles", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": " Controlar el equilibrio entre la coherencia y la diversidad de la salida. Un valor más bajo resultará en un texto más enfocado y coherente. (Por defecto: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Copiado", "Copied shared chat URL to clipboard!": "¡URL de chat compartido copiado al portapapeles!", "Copied to clipboard": "Copiado al portapapeles", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilitar bloqueo de memoria (mlock) para evitar que los datos del modelo se intercambien fuera de la RAM. Esta opción bloquea el conjunto de páginas de trabajo del modelo en la RAM, asegurando que no se intercambiarán fuera del disco. Esto puede ayudar a mantener el rendimiento evitando fallos de página y asegurando un acceso rápido a los datos.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Habilitar asignación de memoria (mmap) para cargar datos del modelo. Esta opción permite al sistema usar el almacenamiento en disco como una extensión de la RAM al tratar los archivos en disco como si estuvieran en la RAM. Esto puede mejorar el rendimiento del modelo permitiendo un acceso más rápido a los datos. Sin embargo, puede no funcionar correctamente con todos los sistemas y puede consumir una cantidad significativa de espacio en disco.", "Enable Message Rating": "Habilitar la calificación de los mensajes", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilitar muestreo Mirostat para controlar la perplejidad. (Predeterminado: 0, 0 = Deshabilitado, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Habilitar Nuevos Registros", "Enabled": "Activado", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asegúrese de que su archivo CSV incluya 4 columnas en este orden: Nombre, Correo Electrónico, Contraseña, Rol.", @@ -566,7 +566,7 @@ "Include": "Incluir", "Include `--api-auth` flag when running stable-diffusion-webui": "Incluir el indicador `--api-auth` al ejecutar stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influencia en qué medida el algoritmo responde rápidamente a la retroalimentación del texto generado. Una tasa de aprendizaje más baja resultará en ajustes más lentos, mientras que una tasa de aprendizaje más alta hará que el algoritmo sea más receptivo. (Predeterminado: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Información", "Input commands": "Ingresar comandos", "Install from Github URL": "Instalar desde la URL de Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "Esfuerzo de razonamiento", "Record voice": "Grabar voz", "Redirecting you to Open WebUI Community": "Redireccionándote a la comunidad OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduce la probabilidad de generar tonterías. Un valor más alto (p.ej. 100) dará respuestas más diversas, mientras que un valor más bajo (p.ej. 10) será más conservador. (Predeterminado: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referirse a usted mismo como \"Usuario\" (por ejemplo, \"El usuario está aprendiendo Español\")", "References from": "Referencias de", "Refused when it shouldn't have": "Rechazado cuando no debería", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establece el número de hilos de trabajo utilizados para el cálculo. Esta opción controla cuántos hilos se utilizan para procesar las solicitudes entrantes simultáneamente. Aumentar este valor puede mejorar el rendimiento bajo cargas de trabajo de alta concurrencia, pero también puede consumir más recursos de CPU.", "Set Voice": "Establecer la voz", "Set whisper model": "Establecer modelo de whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establece cuán lejos atrás debe mirar el modelo para evitar la repetición. (Predeterminado: 64, 0 = deshabilitado, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establece la semilla de número aleatorio a usar para la generación. Establecer esto en un número específico hará que el modelo genere el mismo texto para el mismo prompt. (Predeterminado: aleatorio)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Establece el tamaño de la ventana de contexto utilizada para generar el siguiente token. (Predeterminado: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Establece las secuencias de parada a usar. Cuando se encuentre este patrón, el LLM dejará de generar texto y devolverá. Se pueden establecer varios patrones de parada especificando múltiples parámetros de parada separados en un archivo de modelo.", "Settings": "Configuración", "Settings saved successfully!": "¡Configuración guardada con éxito!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt del sistema", "Tags Generation": "Generación de etiquetas", "Tags Generation Prompt": "Prompt de generación de etiquetas", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El muestreo libre de cola se utiliza para reducir el impacto de los tokens menos probables en la salida. Un valor más alto (p.ej., 2.0) reducirá el impacto más, mientras que un valor de 1.0 deshabilitará esta configuración. (predeterminado: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Toca para interrumpir", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "¡Gracias por tu retroalimentación!", "The Application Account DN you bind with for search": "La cuenta de aplicación DN que vincula para la búsqueda", "The base to search for users": "La base para buscar usuarios", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Los desarrolladores de este plugin son apasionados voluntarios de la comunidad. Si encuentras este plugin útil, por favor considere contribuir a su desarrollo.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "El tablero de líderes de evaluación se basa en el sistema de clasificación Elo y se actualiza en tiempo real.", "The LDAP attribute that maps to the mail that users use to sign in.": "El atributo LDAP que se asigna al correo que los usuarios utilizan para iniciar sesión.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "El tamaño máximo del archivo en MB. Si el tamaño del archivo supera este límite, el archivo no se subirá.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "El número máximo de archivos que se pueden utilizar a la vez en chat. Si este límite es superado, los archivos no se subirán.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "La puntuación debe ser un valor entre 0.0 (0%) y 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La temperatura del modelo. Aumentar la temperatura hará que el modelo responda de manera más creativa. (Predeterminado: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Pensando...", "This action cannot be undone. Do you wish to continue?": "Esta acción no se puede deshacer. ¿Desea continuar?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guarden de forma segura en su base de datos en el backend. ¡Gracias!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental que puede no funcionar como se esperaba y está sujeto a cambios en cualquier momento.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Esta opción controla cuántos tokens se conservan al actualizar el contexto. Por ejemplo, si se establece en 2, se conservarán los últimos 2 tokens del contexto de la conversación. Conservar el contexto puede ayudar a mantener la continuidad de una conversación, pero puede reducir la capacidad de responder a nuevos temas. (Predeterminado: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Esta opción eliminará todos los archivos existentes en la colección y los reemplazará con nuevos archivos subidos.", "This response was generated by \"{{model}}\"": "Esta respuesta fue generada por \"{{model}}\"", "This will delete": "Esto eliminará", @@ -1132,7 +1132,7 @@ "Why?": "¿Por qué?", "Widescreen Mode": "Modo de pantalla ancha", "Won": "Ganado", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona junto con top-k. Un valor más alto (p.ej., 0.95) dará como resultado un texto más diverso, mientras que un valor más bajo (p.ej., 0.5) generará un texto más enfocado y conservador. (Predeterminado: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espacio de trabajo", "Workspace Permissions": "Permisos del espacio de trabajo", "Write": "Escribir", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index ee5ed5e0c87..e1ef7f7ced5 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Baimendu Ahots Etena Deietan", "Allowed Endpoints": "", "Already have an account?": "Baduzu kontu bat?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p-ren alternatiba, kalitate eta aniztasunaren arteko oreka bermatzea du helburu. p parametroak token bat kontuan hartzeko gutxieneko probabilitatea adierazten du, token probableenaren probabilitatearen arabera. Adibidez, p=0.05 balioarekin eta token probableenaren probabilitatea 0.9 denean, 0.045 baino balio txikiagoko logit-ak baztertzen dira. (Lehenetsia: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Harrigarria", "an assistant": "laguntzaile bat", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Konexioak", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Jarri harremanetan Administratzailearekin WebUI Sarbiderako", "Content": "Edukia", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Jarraitu Posta Elektronikoarekin", "Continue with LDAP": "Jarraitu LDAP-rekin", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrolatu nola banatzen den mezuaren testua TTS eskaeretarako. 'Puntuazioa'-k esaldietan banatzen du, 'paragrafoak'-k paragrafoetan, eta 'bat ere ez'-ek mezua kate bakar gisa mantentzen du.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Kontrolak", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Irteeraren koherentzia eta aniztasunaren arteko oreka kontrolatzen du. Balio txikiagoak testu zentratuagoa eta koherenteagoa emango du. (Lehenetsia: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopiatuta", "Copied shared chat URL to clipboard!": "Partekatutako txataren URLa arbelera kopiatu da!", "Copied to clipboard": "Arbelera kopiatuta", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Gaitu Memoria Blokeatzea (mlock) ereduaren datuak RAM memoriatik kanpo ez trukatzeko. Aukera honek ereduaren lan-orri multzoa RAMean blokatzen du, diskora ez direla trukatuko ziurtatuz. Honek errendimendua mantentzen lagun dezake, orri-hutsegiteak saihestuz eta datuen sarbide azkarra bermatuz.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Gaitu Memoria Mapaketa (mmap) ereduaren datuak kargatzeko. Aukera honek sistemari disko-biltegiratzea RAM memoriaren luzapen gisa erabiltzea ahalbidetzen dio, diskoko fitxategiak RAMean baleude bezala tratatuz. Honek ereduaren errendimendua hobe dezake, datuen sarbide azkarragoa ahalbidetuz. Hala ere, baliteke sistema guztietan behar bezala ez funtzionatzea eta disko-espazio handia kontsumitu dezake.", "Enable Message Rating": "Gaitu Mezuen Balorazioa", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Gaitu Mirostat laginketa nahasmena kontrolatzeko. (Lehenetsia: 0, 0 = Desgaituta, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Gaitu Izena Emate Berriak", "Enabled": "Gaituta", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ziurtatu zure CSV fitxategiak 4 zutabe dituela ordena honetan: Izena, Posta elektronikoa, Pasahitza, Rola.", @@ -566,7 +566,7 @@ "Include": "Sartu", "Include `--api-auth` flag when running stable-diffusion-webui": "Sartu `--api-auth` bandera stable-diffusion-webui exekutatzean", "Include `--api` flag when running stable-diffusion-webui": "Sartu `--api` bandera stable-diffusion-webui exekutatzean", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Algoritmoak sortutako testutik jasotako feedbackari erantzuteko abiadura zehazten du. Ikasketa-tasa baxuago batek doikuntza motelagoak eragingo ditu, eta ikasketa-tasa altuago batek algoritmoaren erantzuna bizkorragoa egingo du. (Lehenetsia: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informazioa", "Input commands": "Sartu komandoak", "Install from Github URL": "Instalatu Github URLtik", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Grabatu ahotsa", "Redirecting you to Open WebUI Community": "OpenWebUI Komunitatera berbideratzen", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Zentzugabekeriak sortzeko probabilitatea murrizten du. Balio altuago batek (adib. 100) erantzun anitzagoak emango ditu, balio baxuago batek (adib. 10) kontserbadoreagoa izango den bitartean. (Lehenetsia: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Egin erreferentzia zure buruari \"Erabiltzaile\" gisa (adib., \"Erabiltzailea gaztelania ikasten ari da\")", "References from": "Erreferentziak hemendik", "Refused when it shouldn't have": "Ukatu duenean ukatu behar ez zuenean", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ezarri kalkulurako erabilitako langile harien kopurua. Aukera honek kontrolatzen du zenbat hari erabiltzen diren sarrerako eskaerak aldi berean prozesatzeko. Balio hau handitzeak errendimendua hobetu dezake konkurrentzia altuko lan-kargetan, baina CPU baliabide gehiago kontsumitu ditzake.", "Set Voice": "Ezarri ahotsa", "Set whisper model": "Ezarri whisper modeloa", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Ezartzen du modeloak zenbat atzera begiratu behar duen errepikapenak saihesteko. (Lehenetsia: 64, 0 = desgaituta, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ezartzen du sorkuntzarako erabiliko den ausazko zenbakien hazia. Hau zenbaki zehatz batera ezartzeak modeloak testu bera sortzea eragingo du prompt bererako. (Lehenetsia: ausazkoa)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Ezartzen du hurrengo tokena sortzeko erabilitako testuinguru leihoaren tamaina. (Lehenetsia: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ezartzen ditu erabiliko diren gelditzeko sekuentziak. Patroi hau aurkitzen denean, LLMak testua sortzeari utziko dio eta itzuli egingo da. Gelditzeko patroi anitz ezar daitezke modelfile batean gelditzeko parametro anitz zehaztuz.", "Settings": "Ezarpenak", "Settings saved successfully!": "Ezarpenak ongi gorde dira!", @@ -964,7 +964,7 @@ "System Prompt": "Sistema prompta", "Tags Generation": "", "Tags Generation Prompt": "Etiketa sortzeko prompta", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Isats-libre laginketa erabiltzen da irteran probabilitate txikiagoko tokenen eragina murrizteko. Balio altuago batek (adib., 2.0) eragina gehiago murriztuko du, 1.0 balioak ezarpen hau desgaitzen duen bitartean. (lehenetsia: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Ukitu eteteko", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Eskerrik asko zure iritzia emateagatik!", "The Application Account DN you bind with for search": "Bilaketarako lotzen duzun aplikazio kontuaren DN-a", "The base to search for users": "Erabiltzaileak bilatzeko oinarria", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Sorta tamainak zehazten du zenbat testu eskaera prozesatzen diren batera aldi berean. Sorta tamaina handiago batek modeloaren errendimendua eta abiadura handitu ditzake, baina memoria gehiago behar du. (Lehenetsia: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Plugin honen atzean dauden garatzaileak komunitateko boluntario sutsuak dira. Plugin hau baliagarria iruditzen bazaizu, mesedez kontuan hartu bere garapenean laguntzea.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ebaluazio sailkapena Elo sailkapen sisteman oinarritzen da eta denbora errealean eguneratzen da.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Fitxategiaren gehienezko tamaina MB-tan. Fitxategiaren tamainak muga hau gainditzen badu, fitxategia ez da kargatuko.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Txatean aldi berean erabili daitezkeen fitxategien gehienezko kopurua. Fitxategi kopuruak muga hau gainditzen badu, fitxategiak ez dira kargatuko.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Puntuazioa 0.0 (0%) eta 1.0 (100%) arteko balio bat izan behar da.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Modeloaren tenperatura. Tenperatura handitzeak modeloaren erantzunak sortzaileagoak izatea eragingo du. (Lehenetsia: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Gaia", "Thinking...": "Pentsatzen...", "This action cannot be undone. Do you wish to continue?": "Ekintza hau ezin da desegin. Jarraitu nahi duzu?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Aukera honek kontrolatzen du zenbat token mantentzen diren testuingurua freskatzean. Adibidez, 2-ra ezarrita badago, elkarrizketaren testuinguruko azken 2 tokenak mantenduko dira. Testuingurua mantentzeak elkarrizketaren jarraitutasuna mantentzen lagun dezake, baina gai berriei erantzuteko gaitasuna murriztu dezake. (Lehenetsia: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Aukera honek ereduak bere erantzunean sor dezakeen token kopuru maximoa ezartzen du. Muga hau handitzeak ereduari erantzun luzeagoak emateko aukera ematen dio, baina eduki ez-erabilgarri edo ez-egokia sortzeko probabilitatea ere handitu dezake. (Lehenetsia: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aukera honek bilduman dauden fitxategi guztiak ezabatuko ditu eta berriki kargatutako fitxategiekin ordezkatuko ditu.", "This response was generated by \"{{model}}\"": "Erantzun hau \"{{model}}\" modeloak sortu du", "This will delete": "Honek ezabatuko du", @@ -1132,7 +1132,7 @@ "Why?": "Zergatik?", "Widescreen Mode": "Pantaila zabaleko modua", "Won": "Irabazi du", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Top-k-rekin batera lan egiten du. Balio altuago batek (adib., 0.95) testu anitzagoa sortuko du, balio baxuago batek (adib., 0.5) testu fokatu eta kontserbadoreagoa sortuko duen bitartean. (Lehenetsia: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Lan-eremua", "Workspace Permissions": "Lan-eremuaren baimenak", "Write": "", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 038d3ec74df..f887c671dc2 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "از قبل حساب کاربری دارید؟", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "یک دستیار", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "ارتباطات", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "برای دسترسی به WebUI با مدیر تماس بگیرید", "Content": "محتوا", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "کنترل\u200cها", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "کپی شد", "Copied shared chat URL to clipboard!": "URL چت به کلیپ بورد کپی شد!", "Copied to clipboard": "به بریده\u200cدان کپی\u200cشد", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "فعال کردن ثبت نام\u200cهای جدید", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "اطمینان حاصل کنید که فایل CSV شما شامل چهار ستون در این ترتیب است: نام، ایمیل، رمز عبور، نقش.", @@ -566,7 +566,7 @@ "Include": "شامل", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "اطلاعات", "Input commands": "ورودی دستورات", "Install from Github URL": "نصب از ادرس Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "ضبط صدا", "Redirecting you to Open WebUI Community": "در حال هدایت به OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "رد شده زمانی که باید نباشد", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "تنظیم صدا", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "تنظیمات", "Settings saved successfully!": "تنظیمات با موفقیت ذخیره شد!", @@ -964,7 +964,7 @@ "System Prompt": "پرامپت سیستم", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "با تشکر از بازخورد شما!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "امتیاز باید یک مقدار بین 0.0 (0%) و 1.0 (100%) باشد.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "پوسته", "Thinking...": "در حال فکر...", "This action cannot be undone. Do you wish to continue?": "این اقدام قابل بازگردانی نیست. برای ادامه اطمینان دارید؟", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این تضمین می کند که مکالمات ارزشمند شما به طور ایمن در پایگاه داده بکند ذخیره می شود. تشکر!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "حالت صفحهٔ عریض", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "محیط کار", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index b5e09bb8b20..b9b5f966331 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Salli äänen keskeytys puhelussa", "Allowed Endpoints": "Hyväksytyt päätepisteet", "Already have an account?": "Onko sinulla jo tili?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Vaihtoehto top_p:lle, jolla pyritään varmistamaan laadun ja monipuolisuuden tasapaino. Parametri p edustaa pienintä todennäköisyyttä, jolla token otetaan huomioon suhteessa todennäköisimpään tokeniin. Esimerkiksi p=0.05 ja todennäköisin token todennäköisyydellä 0.9, arvoltaan alle 0.045 olevat logit suodatetaan pois. (Oletus: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Aina", "Amazing": "Hämmästyttävä", "an assistant": "avustaja", @@ -208,7 +208,7 @@ "Confirm your new password": "Vahvista uusi salasanasi", "Connect to your own OpenAI compatible API endpoints.": "Yhdistä oma OpenAI yhteensopiva API päätepiste.", "Connections": "Yhteydet", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Ota yhteyttä ylläpitäjään WebUI-käyttöä varten", "Content": "Sisältö", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Jatka sähköpostilla", "Continue with LDAP": "Jatka LDAP:illa", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Säädä, miten viestin teksti jaetaan puhesynteesipyyntöjä varten. 'Välimerkit' jakaa lauseisiin, 'kappaleet' jakaa kappaleisiin ja 'ei mitään' pitää viestin yhtenä merkkijonona.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Ohjaimet", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Säätelee tulosteen yhtenäisyyden ja monimuotoisuuden välistä tasapainoa. Alhaisempi arvo tuottaa keskittyneempää ja yhtenäisempää tekstiä. (Oletus: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopioitu", "Copied shared chat URL to clipboard!": "Jaettu keskustelulinkki kopioitu leikepöydälle!", "Copied to clipboard": "Kopioitu leikepöydälle", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ota Memory Locking (mlock) käyttöön estääksesi mallidatan vaihtamisen pois RAM-muistista. Tämä lukitsee mallin työsivut RAM-muistiin, varmistaen että niitä ei vaihdeta levylle. Tämä voi parantaa suorituskykyä välttämällä sivuvikoja ja varmistamalla nopean tietojen käytön.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ota Memory Mapping (mmap) käyttöön ladataksesi mallidataa. Tämä vaihtoehto sallii järjestelmän käyttää levytilaa RAM-laajennuksena käsittelemällä levytiedostoja kuin ne olisivat RAM-muistissa. Tämä voi parantaa mallin suorituskykyä sallimalla nopeamman tietojen käytön. Kuitenkin se ei välttämättä toimi oikein kaikissa järjestelmissä ja voi kuluttaa huomattavasti levytilaa.", "Enable Message Rating": "Ota viestiarviointi käyttöön", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ota Mirostat-näytteenotto käyttöön hallinnan monimerkityksellisyydelle. (Oletus: 0, 0 = Ei käytössä, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Salli uudet rekisteröitymiset", "Enabled": "Käytössä", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Varmista, että CSV-tiedostossasi on 4 saraketta tässä järjestyksessä: Nimi, Sähköposti, Salasana, Rooli.", @@ -566,7 +566,7 @@ "Include": "Sisällytä", "Include `--api-auth` flag when running stable-diffusion-webui": "Sisällytä `--api-auth`-lippu ajettaessa stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Sisällytä `--api`-lippu ajettaessa stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Vaikuttaa siihen, kuinka nopeasti algoritmi reagoi tuotetusta tekstistä saatuun palautteeseen. Alhaisempi oppimisaste johtaa hitaampiin säätöihin, kun taas korkeampi oppimisaste tekee algoritmista reaktiivisemman. (Oletus: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Tiedot", "Input commands": "Syötekäskyt", "Install from Github URL": "Asenna Github-URL:stä", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Nauhoita ääntä", "Redirecting you to Open WebUI Community": "Ohjataan sinut OpenWebUI-yhteisöön", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Vähentää merkityksetöntä sisältöä tuottavan todennäköisyyttä. Korkeampi arvo (esim. 100) antaa monipuolisempia vastauksia, kun taas alhaisempi arvo (esim. 10) on konservatiivisempi. (Oletus: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Viittaa itseen \"Käyttäjänä\" (esim. \"Käyttäjä opiskelee espanjaa\")", "References from": "Viitteet lähteistä", "Refused when it shouldn't have": "Kieltäytyi, vaikka ei olisi pitänyt", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Aseta työntekijäsäikeiden määrä laskentaa varten. Tämä asetus kontrolloi, kuinka monta säiettä käytetään saapuvien pyyntöjen rinnakkaiseen käsittelyyn. Arvon kasvattaminen voi parantaa suorituskykyä suurissa samanaikaisissa työkuormissa, mutta voi myös kuluttaa enemmän keskussuorittimen resursseja.", "Set Voice": "Aseta puheääni", "Set whisper model": "Aseta whisper-malli", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Määrittää, kuinka kauas taaksepäin malli katsoo välttääkseen toistoa. (Oletus: 64, 0 = pois käytöstä, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Määrittää satunnaislukujen siemenen käytettäväksi generoinnissa. Tämän asettaminen tiettyyn numeroon saa mallin tuottamaan saman tekstin samalle kehoteelle. (Oletus: satunnainen)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Määrittää kontekstiikkunan koon, jota käytetään seuraavan tokenin tuottamiseen. (Oletus: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Määrittää käytettävät lopetussekvenssit. Kun tämä kuvio havaitaan, LLM lopettaa tekstin tuottamisen ja palauttaa. Useita lopetuskuvioita voidaan asettaa määrittämällä useita erillisiä lopetusparametreja mallitiedostoon.", "Settings": "Asetukset", "Settings saved successfully!": "Asetukset tallennettu onnistuneesti!", @@ -964,7 +964,7 @@ "System Prompt": "Järjestelmäkehote", "Tags Generation": "Tagien luonti", "Tags Generation Prompt": "Tagien luontikehote", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-free-otanta käytetään vähentämään vähemmän todennäköisten tokenien vaikutusta tulokseen. Korkeampi arvo (esim. 2,0) vähentää vaikutusta enemmän, kun taas arvo 1,0 poistaa tämän asetuksen käytöstä. (oletus: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Napauta keskeyttääksesi", "Tasks": "Tehtävät", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Kiitos palautteestasi!", "The Application Account DN you bind with for search": "Hakua varten sidottu sovelluksen käyttäjätilin DN", "The base to search for users": "Käyttäjien haun perusta", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Erän koko määrittää, kuinka monta tekstipyyntöä käsitellään yhdessä kerralla. Suurempi erän koko voi parantaa mallin suorituskykyä ja nopeutta, mutta se vaatii myös enemmän muistia. (Oletus: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Tämän lisäosan takana olevat kehittäjät ovat intohimoisia vapaaehtoisyhteisöstä. Jos koet tämän lisäosan hyödylliseksi, harkitse sen kehittämisen tukemista.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Arviointitulosluettelo perustuu Elo-luokitusjärjestelmään ja päivittyy reaaliajassa.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Enimmäistiedostokoko megatavuissa. Jos tiedoston koko ylittää tämän rajan, tiedostoa ei ladata.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Suurin sallittu tiedostojen määrä käytettäväksi kerralla chatissa. Jos tiedostojen määrä ylittää tämän rajan, niitä ei ladata.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Pisteytyksen tulee olla arvo välillä 0,0 (0 %) ja 1,0 (100 %).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Mallin lämpötila. Lämpötilan nostaminen saa mallin vastaamaan luovemmin. (Oletus: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Teema", "Thinking...": "Ajattelee...", "This action cannot be undone. Do you wish to continue?": "Tätä toimintoa ei voi peruuttaa. Haluatko jatkaa?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Tämä asetus kontrolloi, kuinka monta tokenia säilytetään päivittäessä kontekstia. Esimerkiksi, jos asetetaan arvoksi 2, säilytetään viimeiset 2 keskustelukon-tekstin tokenia. Kontekstin säilyttäminen voi auttaa ylläpitämään keskustelun jatkuvuutta, mutta se voi vähentää kykyä vastata uusiin aiheisiin. (Oletus: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Tämä asetus määrittää mallin vastauksen enimmäistokenmäärän. Tämän rajan nostaminen mahdollistaa mallin antavan pidempiä vastauksia, mutta se voi myös lisätä epähyödyllisen tai epärelevantin sisällön todennäköisyyttä. (Oletus: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tämä vaihtoehto poistaa kaikki kokoelman nykyiset tiedostot ja korvaa ne uusilla ladatuilla tiedostoilla.", "This response was generated by \"{{model}}\"": "Tämän vastauksen tuotti \"{{model}}\"", "This will delete": "Tämä poistaa", @@ -1132,7 +1132,7 @@ "Why?": "Miksi?", "Widescreen Mode": "Laajakuvatila", "Won": "Voitti", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Toimii yhdessä top-k:n kanssa. Korkeampi arvo (esim. 0,95) tuottaa monipuolisempaa tekstiä, kun taas alhaisempi arvo (esim. 0,5) tuottaa keskittyneempää ja konservatiivisempaa tekstiä. (Oletus: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Työtila", "Workspace Permissions": "Työtilan käyttöoikeudet", "Write": "Kirjoita", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index 7e98dd7e4cf..c6bf7c1efae 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Autoriser l'interruption vocale pendant un appel", "Allowed Endpoints": "", "Already have an account?": "Avez-vous déjà un compte ?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "un assistant", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Connexions", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web", "Content": "Contenu", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle comment le texte des messages est divisé pour les demandes de TTS. 'Ponctuation' divise en phrases, 'paragraphes' divise en paragraphes et 'aucun' garde le message comme une seule chaîne.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Contrôles", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activer les nouvelles inscriptions", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "Inclure le drapeau `--api-auth` lors de l'exécution de stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Entrez les commandes", "Install from Github URL": "Installer depuis l'URL GitHub", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Enregistrer la voix", "Redirecting you to Open WebUI Community": "Redirection vers la communauté OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Désignez-vous comme « Utilisateur » (par ex. « L'utilisateur apprend l'espagnol »)", "References from": "", "Refused when it shouldn't have": "Refusé alors qu'il n'aurait pas dû l'être", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Définir la voix", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Paramètres", "Settings saved successfully!": "Paramètres enregistrés avec succès !", @@ -964,7 +964,7 @@ "System Prompt": "Prompt du système", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Appuyez pour interrompre", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Merci pour vos commentaires !", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Thème", "Thinking...": "En train de réfléchir...", "This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Cela supprimera", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Mode Grand Écran", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espace de travail", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 56ffd65cf4f..d3c8a235623 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Autoriser l'interruption vocale pendant un appel", "Allowed Endpoints": "Points de terminaison autorisés", "Already have an account?": "Avez-vous déjà un compte ?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternative au top_p, visant à assurer un équilibre entre qualité et variété. Le paramètre p représente la probabilité minimale pour qu'un token soit pris en compte, par rapport à la probabilité du token le plus probable. Par exemple, avec p=0.05 et le token le plus probable ayant une probabilité de 0.9, les logits ayant une valeur inférieure à 0.045 sont filtrés. (Par défaut : 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Incroyable", "an assistant": "un assistant", @@ -208,7 +208,7 @@ "Confirm your new password": "Confirmer votre nouveau mot de passe", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Connexions", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Contraint l'effort de raisonnement pour les modèles de raisonnement. Applicable uniquement aux modèles de raisonnement de fournisseurs spécifiques qui prennent en charge l'effort de raisonnement. (Par défaut : medium)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contacter l'administrateur pour obtenir l'accès à WebUI", "Content": "Contenu", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Continuer avec l'email", "Continue with LDAP": "Continuer avec LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle la façon dont le texte des messages est divisé pour les demandes de Text-to-Speech. « ponctuation » divise en phrases, « paragraphes » divise en paragraphes et « aucun » garde le message en tant que chaîne de texte unique.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Contrôles", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Contrôle l'équilibre entre la cohérence et la diversité de la sortie. Une valeur plus basse produira un texte plus focalisé et cohérent. (Par défaut : 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Copié", "Copied shared chat URL to clipboard!": "URL du chat copié dans le presse-papiers !", "Copied to clipboard": "Copié dans le presse-papiers", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activer le verrouillage de la mémoire (mlock) pour empêcher les données du modèle d'être échangées de la RAM. Cette option verrouille l'ensemble de pages de travail du modèle en RAM, garantissant qu'elles ne seront pas échangées vers le disque. Cela peut aider à maintenir les performances en évitant les défauts de page et en assurant un accès rapide aux données.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activer le mappage de la mémoire (mmap) pour charger les données du modèle. Cette option permet au système d'utiliser le stockage disque comme une extension de la RAM en traitant les fichiers disque comme s'ils étaient en RAM. Cela peut améliorer les performances du modèle en permettant un accès plus rapide aux données. Cependant, cela peut ne pas fonctionner correctement avec tous les systèmes et peut consommer une quantité significative d'espace disque.", "Enable Message Rating": "Activer l'évaluation des messages", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activer l'échantillonnage Mirostat pour contrôler la perplexité. (Par défaut : 0, 0 = Désactivé, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activer les nouvelles inscriptions", "Enabled": "Activé", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "Inclure", "Include `--api-auth` flag when running stable-diffusion-webui": "Inclure le drapeau `--api-auth` lors de l'exécution de stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influence la rapidité avec laquelle l'algorithme répond aux retours du texte généré. Un taux d'apprentissage plus bas entraînera des ajustements plus lents, tandis qu'un taux d'apprentissage plus élevé rendra l'algorithme plus réactif. (Par défaut : 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Commandes d'entrée", "Install from Github URL": "Installer depuis une URL GitHub", @@ -809,7 +809,7 @@ "Reasoning Effort": "Effort de raisonnement", "Record voice": "Enregistrer la voix", "Redirecting you to Open WebUI Community": "Redirection vers la communauté OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Réduit la probabilité de générer des non-sens. Une valeur plus élevée (par exemple 100) donnera des réponses plus diversifiées, tandis qu'une valeur plus basse (par exemple 10) sera plus conservatrice. (Par défaut : 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Désignez-vous comme « Utilisateur » (par ex. « L'utilisateur apprend l'espagnol »)", "References from": "Références de", "Refused when it shouldn't have": "Refusé alors qu'il n'aurait pas dû l'être", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Définir le nombre de threads de travail utilisés pour le calcul. Cette option contrôle combien de threads sont utilisés pour traiter les demandes entrantes simultanément. L'augmentation de cette valeur peut améliorer les performances sous de fortes charges de travail concurrentes mais peut également consommer plus de ressources CPU.", "Set Voice": "Choisir la voix", "Set whisper model": "Choisir le modèle Whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Définit la profondeur de recherche du modèle pour prévenir les répétitions. (Par défaut : 64, 0 = désactivé, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Définit la graine de nombre aléatoire à utiliser pour la génération. La définition de cette valeur à un nombre spécifique fera que le modèle générera le même texte pour le même prompt. (Par défaut : aléatoire)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Définit la taille de la fenêtre contextuelle utilisée pour générer le prochain token. (Par défaut : 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Définit les séquences d'arrêt à utiliser. Lorsque ce motif est rencontré, le LLM cessera de générer du texte et retournera. Plusieurs motifs d'arrêt peuvent être définis en spécifiant plusieurs paramètres d'arrêt distincts dans un fichier modèle.", "Settings": "Paramètres", "Settings saved successfully!": "Paramètres enregistrés avec succès !", @@ -964,7 +964,7 @@ "System Prompt": "Prompt système", "Tags Generation": "Génération de tags", "Tags Generation Prompt": "Prompt de génération de tags", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "L'échantillonnage sans queue est utilisé pour réduire l'impact des tokens moins probables dans la sortie. Une valeur plus élevée (par exemple 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "Parler au modèle", "Tap to interrupt": "Appuyez pour interrompre", "Tasks": "Tâches", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Merci pour vos commentaires !", "The Application Account DN you bind with for search": "Le DN du compte de l'application avec lequel vous vous liez pour la recherche", "The base to search for users": "La base pour rechercher des utilisateurs", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "La taille de lot détermine combien de demandes de texte sont traitées ensemble en une fois. Une taille de lot plus grande peut augmenter les performances et la vitesse du modèle, mais elle nécessite également plus de mémoire. (Par défaut : 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Les développeurs de ce plugin sont des bénévoles passionnés issus de la communauté. Si vous trouvez ce plugin utile, merci de contribuer à son développement.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Le classement d'évaluation est basé sur le système de notation Elo et est mis à jour en temps réel.", "The LDAP attribute that maps to the mail that users use to sign in.": "L'attribut LDAP qui correspond à l'adresse e-mail que les utilisateurs utilisent pour se connecter.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La taille maximale du fichier en Mo. Si la taille du fichier dépasse cette limite, le fichier ne sera pas téléchargé.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Le nombre maximal de fichiers pouvant être utilisés en même temps dans la conversation. Si le nombre de fichiers dépasse cette limite, les fichiers ne seront pas téléchargés.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0%) et 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La température du modèle. Augmenter la température rendra le modèle plus créatif dans ses réponses. (Par défaut : 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Thème", "Thinking...": "En train de réfléchir...", "This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Cette option contrôle combien de tokens sont conservés lors du rafraîchissement du contexte. Par exemple, si ce paramètre est défini à 2, les 2 derniers tokens du contexte de conversation seront conservés. Préserver le contexte peut aider à maintenir la continuité d'une conversation, mais cela peut réduire la capacité à répondre à de nouveaux sujets. (Par défaut : 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Cette option définit le nombre maximum de tokens que le modèle peut générer dans sa réponse. Augmenter cette limite permet au modèle de fournir des réponses plus longues, mais cela peut également augmenter la probabilité de générer du contenu inutile ou non pertinent. (Par défaut : 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Cette option supprimera tous les fichiers existants dans la collection et les remplacera par les fichiers nouvellement téléchargés.", "This response was generated by \"{{model}}\"": "Cette réponse a été générée par \"{{model}}\"", "This will delete": "Cela supprimera", @@ -1132,7 +1132,7 @@ "Why?": "Pourquoi ?", "Widescreen Mode": "Mode grand écran", "Won": "Victoires", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fonctionne avec le top-k. Une valeur plus élevée (par ex. 0.95) donnera un texte plus diversifié, tandis qu'une valeur plus basse (par ex. 0.5) générera un texte plus concentré et conservateur. (Par défaut : 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espace de travail", "Workspace Permissions": "Autorisations de l'espace de travail", "Write": "Écrire", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 8eef95dd45c..3c4be75569a 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "כבר יש לך חשבון?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "עוזר", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "חיבורים", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "תוכן", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "העתקת כתובת URL של צ'אט משותף ללוח!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "אפשר הרשמות חדשות", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ודא שקובץ ה-CSV שלך כולל 4 עמודות בסדר הבא: שם, דוא\"ל, סיסמה, תפקיד.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "כלול את הדגל `--api` בעת הרצת stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "מידע", "Input commands": "פקודות קלט", "Install from Github URL": "התקן מכתובת URL של Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "הקלט קול", "Redirecting you to Open WebUI Community": "מפנה אותך לקהילת OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "נדחה כאשר לא היה צריך", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "הגדר קול", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "הגדרות", "Settings saved successfully!": "ההגדרות נשמרו בהצלחה!", @@ -964,7 +964,7 @@ "System Prompt": "תגובת מערכת", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "תודה על המשוב שלך!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "ציון צריך להיות ערך בין 0.0 (0%) ל-1.0 (100%)", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "נושא", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "סביבה", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 67058123e3d..a0f08fcd86b 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "क्या आपके पास पहले से एक खाता मौजूद है?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "एक सहायक", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "सम्बन्ध", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "सामग्री", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "साझा चैट URL को क्लिपबोर्ड पर कॉपी किया गया!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "नए साइन अप सक्रिय करें", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "सुनिश्चित करें कि आपकी CSV फ़ाइल में इस क्रम में 4 कॉलम शामिल हैं: नाम, ईमेल, पासवर्ड, भूमिका।", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "सूचना-विषयक", "Input commands": "इनपुट क命", "Install from Github URL": "Github URL से इंस्टॉल करें", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "आवाज रिकॉर्ड करना", "Redirecting you to Open WebUI Community": "आपको OpenWebUI समुदाय पर पुनर्निर्देशित किया जा रहा है", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "जब ऐसा नहीं होना चाहिए था तो मना कर दिया", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "आवाज सेट करें", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "सेटिंग्स", "Settings saved successfully!": "सेटिंग्स सफलतापूर्वक सहेजी गईं!", @@ -964,7 +964,7 @@ "System Prompt": "सिस्टम प्रॉम्प्ट", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "आपकी प्रतिक्रिया के लिए धन्यवाद!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "स्कोर का मान 0.0 (0%) और 1.0 (100%) के बीच होना चाहिए।", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "थीम", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "वर्कस्पेस", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index de7aa166bdb..6ce082d7fd0 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "Već imate račun?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "asistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Povezivanja", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktirajte admina za WebUI pristup", "Content": "Sadržaj", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "URL dijeljenog razgovora kopiran u međuspremnik!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Omogući nove prijave", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Provjerite da vaša CSV datoteka uključuje 4 stupca u ovom redoslijedu: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Uključite zastavicu `--api` prilikom pokretanja stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informacije", "Input commands": "Unos naredbi", "Install from Github URL": "Instaliraj s Github URL-a", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Snimanje glasa", "Redirecting you to Open WebUI Community": "Preusmjeravanje na OpenWebUI zajednicu", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Nazivajte se \"Korisnik\" (npr. \"Korisnik uči španjolski\")", "References from": "", "Refused when it shouldn't have": "Odbijen kada nije trebao biti", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Postavi glas", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Postavke", "Settings saved successfully!": "Postavke su uspješno spremljene!", @@ -964,7 +964,7 @@ "System Prompt": "Sistemski prompt", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Hvala na povratnim informacijama!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Ocjena treba biti vrijednost između 0,0 (0%) i 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Razmišljam", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Mod širokog zaslona", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Radna ploča", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index 4f59a6684b5..f4141b2b9ca 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Hang megszakítás engedélyezése hívás közben", "Allowed Endpoints": "", "Already have an account?": "Már van fiókod?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "egy asszisztens", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Kapcsolatok", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Lépj kapcsolatba az adminnal a WebUI hozzáférésért", "Content": "Tartalom", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Szabályozd, hogyan legyen felosztva az üzenet szövege a TTS kérésekhez. A 'Központozás' mondatokra bontja, a 'Bekezdések' bekezdésekre bontja, a 'Nincs' pedig egyetlen szövegként kezeli az üzenetet.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Vezérlők", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Másolva", "Copied shared chat URL to clipboard!": "Megosztott beszélgetés URL másolva a vágólapra!", "Copied to clipboard": "Vágólapra másolva", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Üzenet értékelés engedélyezése", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Új regisztrációk engedélyezése", "Enabled": "Engedélyezve", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Győződj meg róla, hogy a CSV fájl tartalmazza ezt a 4 oszlopot ebben a sorrendben: Név, Email, Jelszó, Szerep.", @@ -566,7 +566,7 @@ "Include": "Tartalmaz", "Include `--api-auth` flag when running stable-diffusion-webui": "Add hozzá a `--api-auth` kapcsolót a stable-diffusion-webui futtatásakor", "Include `--api` flag when running stable-diffusion-webui": "Add hozzá a `--api` kapcsolót a stable-diffusion-webui futtatásakor", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Információ", "Input commands": "Beviteli parancsok", "Install from Github URL": "Telepítés Github URL-ről", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Hang rögzítése", "Redirecting you to Open WebUI Community": "Átirányítás az OpenWebUI közösséghez", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Hivatkozzon magára \"Felhasználó\"-ként (pl. \"A Felhasználó spanyolul tanul\")", "References from": "Hivatkozások innen", "Refused when it shouldn't have": "Elutasítva, amikor nem kellett volna", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Hang beállítása", "Set whisper model": "Whisper modell beállítása", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Beállítások", "Settings saved successfully!": "Beállítások sikeresen mentve!", @@ -964,7 +964,7 @@ "System Prompt": "Rendszer prompt", "Tags Generation": "", "Tags Generation Prompt": "Címke generálási prompt", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Koppintson a megszakításhoz", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Köszönjük a visszajelzést!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "A bővítmény fejlesztői lelkes önkéntesek a közösségből. Ha hasznosnak találja ezt a bővítményt, kérjük, fontolja meg a fejlesztéséhez való hozzájárulást.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Az értékelési ranglista az Elo értékelési rendszeren alapul és valós időben frissül.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "A maximális fájlméret MB-ban. Ha a fájlméret meghaladja ezt a limitet, a fájl nem lesz feltöltve.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "A chatben egyszerre használható fájlok maximális száma. Ha a fájlok száma meghaladja ezt a limitet, a fájlok nem lesznek feltöltve.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontszámnak 0,0 (0%) és 1,0 (100%) közötti értéknek kell lennie.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Téma", "Thinking...": "Gondolkodik...", "This action cannot be undone. Do you wish to continue?": "Ez a művelet nem vonható vissza. Szeretné folytatni?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ez az opció törli az összes meglévő fájlt a gyűjteményben és lecseréli őket az újonnan feltöltött fájlokkal.", "This response was generated by \"{{model}}\"": "Ezt a választ a \"{{model}}\" generálta", "This will delete": "Ez törölni fogja", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Szélesvásznú mód", "Won": "Nyert", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Munkaterület", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index acb2fa882a0..3ffe26722ea 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Izinkan Gangguan Suara dalam Panggilan", "Allowed Endpoints": "", "Already have an account?": "Sudah memiliki akun?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "asisten", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Koneksi", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Hubungi Admin untuk Akses WebUI", "Content": "Konten", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "Menyalin URL obrolan bersama ke papan klip!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktifkan Pendaftaran Baru", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Pastikan file CSV Anda menyertakan 4 kolom dengan urutan sebagai berikut: Nama, Email, Kata Sandi, Peran.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "Sertakan bendera `--api-auth` saat menjalankan stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `--api` saat menjalankan stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Perintah masukan", "Install from Github URL": "Instal dari URL Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Rekam suara", "Redirecting you to Open WebUI Community": "Mengarahkan Anda ke Komunitas OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Merujuk diri Anda sebagai \"Pengguna\" (misalnya, \"Pengguna sedang belajar bahasa Spanyol\")", "References from": "", "Refused when it shouldn't have": "Menolak ketika seharusnya tidak", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Mengatur Suara", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Pengaturan", "Settings saved successfully!": "Pengaturan berhasil disimpan!", @@ -964,7 +964,7 @@ "System Prompt": "Permintaan Sistem", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Ketuk untuk menyela", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Terima kasih atas umpan balik Anda!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Nilai yang diberikan haruslah nilai antara 0,0 (0%) dan 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Berpikir", "This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak dapat dibatalkan. Apakah Anda ingin melanjutkan?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Ini akan menghapus", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Mode Layar Lebar", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Ruang Kerja", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 29bf681544c..025e21f9e81 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Ceadaigh Briseadh Guth i nGlao", "Allowed Endpoints": "Críochphointí Ceadaithe", "Already have an account?": "Tá cuntas agat cheana féin?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Rogha eile seachas an top_p, agus tá sé mar aidhm aige cothromaíocht cáilíochta agus éagsúlachta a chinntiú. Léiríonn an paraiméadar p an dóchúlacht íosta go mbreithneofar comhartha, i gcoibhneas le dóchúlacht an chomhartha is dóichí. Mar shampla, le p=0.05 agus dóchúlacht 0.9 ag an comhartha is dóichí, déantar logits le luach níos lú ná 0.045 a scagadh amach. (Réamhshocrú: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "I gcónaí", "Amazing": "Iontach", "an assistant": "cúntóir", @@ -208,7 +208,7 @@ "Confirm your new password": "Deimhnigh do phasfhocal nua", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Naisc", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Srianann iarracht ar réasúnaíocht a dhéanamh ar shamhlacha réasúnaíochta. Ní bhaineann ach le samhlacha réasúnaíochta ó sholáthraithe sonracha a thacaíonn le hiarracht réasúnaíochta. (Réamhshocrú: meánach)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Déan teagmháil le Riarachán le haghaidh Rochtana WebUI", "Content": "Ábhar", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Lean ar aghaidh le Ríomhphost", "Continue with LDAP": "Lean ar aghaidh le LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Rialú conas a roinntear téacs teachtaireachta d'iarratais TTS. Roinneann 'poncaíocht' ina abairtí, scoilteann 'míreanna' i míreanna, agus coinníonn 'aon' an teachtaireacht mar shreang amháin.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Rialuithe", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Rialaíonn sé an chothromaíocht idir comhleanúnachas agus éagsúlacht an aschuir. Beidh téacs níos dírithe agus níos soiléire mar thoradh ar luach níos ísle. (Réamhshocrú: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Cóipeáladh", "Copied shared chat URL to clipboard!": "Cóipeáladh URL an chomhrá roinnte chuig an ngearrthaisce!", "Copied to clipboard": "Cóipeáilte go gear", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Cumasaigh Glasáil Cuimhne (mlock) chun sonraí samhaltaithe a chosc ó RAM. Glasálann an rogha seo sraith oibre leathanaigh an mhúnla isteach i RAM, ag cinntiú nach ndéanfar iad a mhalartú go diosca. Is féidir leis seo cabhrú le feidhmíocht a choinneáil trí lochtanna leathanaigh a sheachaint agus rochtain tapa ar shonraí a chinntiú.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Cumasaigh Mapáil Cuimhne (mmap) chun sonraí samhla a lódáil. Ligeann an rogha seo don chóras stóráil diosca a úsáid mar leathnú ar RAM trí chomhaid diosca a chóireáil amhail is dá mba i RAM iad. Is féidir leis seo feidhmíocht na samhla a fheabhsú trí rochtain níos tapúla ar shonraí a cheadú. Mar sin féin, d'fhéadfadh sé nach n-oibreoidh sé i gceart le gach córas agus féadfaidh sé méid suntasach spáis diosca a ithe.", "Enable Message Rating": "Cumasaigh Rátáil Teachtai", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Cumasaigh sampláil Mirostat chun seachrán a rialú. (Réamhshocrú: 0, 0 = Díchumasaithe, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Cumasaigh Clárúcháin Nua", "Enabled": "Cumasaithe", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Déan cinnte go bhfuil 4 cholún san ord seo i do chomhad CSV: Ainm, Ríomhphost, Pasfhocal, Ról.", @@ -566,7 +566,7 @@ "Include": "Cuir san áireamh", "Include `--api-auth` flag when running stable-diffusion-webui": "Cuir bratach `--api-auth` san áireamh agus webui stable-diffusion-reatha á rith", "Include `--api` flag when running stable-diffusion-webui": "Cuir bratach `--api` san áireamh agus webui cobhsaí-scaipthe á rith", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Bíonn tionchar aige ar chomh tapa agus a fhreagraíonn an t-algartam d’aiseolas ón téacs ginte. Beidh coigeartuithe níos moille mar thoradh ar ráta foghlama níos ísle, agus déanfaidh ráta foghlama níos airde an t-algartam níos freagraí. (Réamhshocrú: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Eolas", "Input commands": "Orduithe ionchuir", "Install from Github URL": "Suiteáil ó Github URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "Iarracht Réasúnúcháin", "Record voice": "Taifead guth", "Redirecting you to Open WebUI Community": "Tú a atreorú chuig OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Laghdaíonn sé an dóchúlacht go giniúint nonsense. Tabharfaidh luach níos airde (m.sh. 100) freagraí níos éagsúla, agus beidh luach níos ísle (m.sh. 10) níos coimeádaí. (Réamhshocrú: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Tagairt duit féin mar \"Úsáideoir\" (m.sh., \"Tá an úsáideoir ag foghlaim Spáinnis\")", "References from": "Tagairtí ó", "Refused when it shouldn't have": "Diúltaíodh nuair nár chóir dó", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Socraigh líon na snáitheanna oibrithe a úsáidtear le haghaidh ríomh. Rialaíonn an rogha seo cé mhéad snáithe a úsáidtear chun iarratais a thagann isteach a phróiseáil i gcomhthráth. D'fhéadfadh méadú ar an luach seo feidhmíocht a fheabhsú faoi ualaí oibre comhairgeadra ard ach féadfaidh sé níos mó acmhainní LAP a úsáid freisin.", "Set Voice": "Socraigh Guth", "Set whisper model": "Socraigh múnla cogar", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Socraíonn sé cé chomh fada siar is atá an tsamhail le breathnú siar chun athrá a chosc. (Réamhshocrú: 64, 0 = díchumasaithe, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Socraíonn sé an síol uimhir randamach a úsáid le haghaidh giniúna. Má shocraítear é seo ar uimhir shainiúil, ginfidh an tsamhail an téacs céanna don leid céanna. (Réamhshocrú: randamach)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Socraíonn sé méid na fuinneoige comhthéacs a úsáidtear chun an chéad chomhartha eile a ghiniúint. (Réamhshocrú: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Socraíonn sé na stadanna le húsáid. Nuair a thagtar ar an bpatrún seo, stopfaidh an LLM ag giniúint téacs agus ag filleadh. Is féidir patrúin stad iolracha a shocrú trí pharaiméadair stadanna iolracha a shonrú i gcomhad samhail.", "Settings": "Socruithe", "Settings saved successfully!": "Socruithe sábhálta go rathúil!", @@ -964,7 +964,7 @@ "System Prompt": "Córas Leid", "Tags Generation": "Giniúint Clibeanna", "Tags Generation Prompt": "Clibeanna Giniúint Leid", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Úsáidtear sampláil saor ó eireabaill chun tionchar na n-chomharthaí ón aschur nach bhfuil chomh dóchúil céanna a laghdú. Laghdóidh luach níos airde (m.sh., 2.0) an tionchar níos mó, agus díchumasaíonn luach 1.0 an socrú seo. (réamhshocraithe: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Tapáil chun cur isteach", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Go raibh maith agat as do chuid aiseolas!", "The Application Account DN you bind with for search": "An Cuntas Feidhmchláir DN a nascann tú leis le haghaidh cuardaigh", "The base to search for users": "An bonn chun cuardach a dhéanamh ar úsáideoirí", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Cinneann méid an bhaisc cé mhéad iarratas téacs a phróiseáiltear le chéile ag an am céanna. Is féidir le méid baisc níos airde feidhmíocht agus luas an mhúnla a mhéadú, ach éilíonn sé níos mó cuimhne freisin. (Réamhshocrú: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Is deonacha paiseanta ón bpobal iad na forbróirí taobh thiar den bhreiseán seo. Má aimsíonn an breiseán seo cabhrach leat, smaoinigh ar rannchuidiú lena fhorbairt.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tá an clár ceannairí meastóireachta bunaithe ar chóras rátála Elo agus déantar é a nuashonrú i bhfíor-am.", "The LDAP attribute that maps to the mail that users use to sign in.": "An tréith LDAP a mhapálann don ríomhphost a úsáideann úsáideoirí chun síniú isteach.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Uasmhéid an chomhaid i MB. Má sháraíonn méid an chomhaid an teorainn seo, ní uaslódófar an comhad.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "An líon uasta na gcomhaid is féidir a úsáid ag an am céanna i gcomhrá. Má sháraíonn líon na gcomhaid an teorainn seo, ní uaslódófar na comhaid.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Ba chóir go mbeadh an scór ina luach idir 0.0 (0%) agus 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Teocht an mhúnla. Déanfaidh méadú ar an teocht an freagra múnla níos cruthaithí. (Réamhshocrú: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Téama", "Thinking...": "Ag smaoineamh...", "This action cannot be undone. Do you wish to continue?": "Ní féidir an gníomh seo a chur ar ais. Ar mhaith leat leanúint ar aghaidh?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Rialaíonn an rogha seo cé mhéad comhartha a chaomhnaítear agus an comhthéacs á athnuachan. Mar shampla, má shocraítear go 2 é, coinneofar an 2 chomhartha dheireanacha de chomhthéacs an chomhrá. Is féidir le comhthéacs a chaomhnú cabhrú le leanúnachas comhrá a choinneáil, ach d’fhéadfadh sé laghdú a dhéanamh ar an gcumas freagairt do thopaicí nua. (Réamhshocrú: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Socraíonn an rogha seo an t-uaslíon comharthaí is féidir leis an tsamhail a ghiniúint ina fhreagra. Tríd an teorainn seo a mhéadú is féidir leis an tsamhail freagraí níos faide a sholáthar, ach d’fhéadfadh go méadódh sé an dóchúlacht go nginfear ábhar neamhchabhrach nó nach mbaineann le hábhar. (Réamhshocrú: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Scriosfaidh an rogha seo gach comhad atá sa bhailiúchán agus cuirfear comhaid nua-uaslódála ina n-ionad.", "This response was generated by \"{{model}}\"": "Gin an freagra seo ag \"{{model}}\"", "This will delete": "Scriosfaidh sé seo", @@ -1132,7 +1132,7 @@ "Why?": "Cén fáth?", "Widescreen Mode": "Mód Leathanscáileán", "Won": "Bhuaigh", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Oibríonn sé le barr-k. Beidh téacs níos éagsúla mar thoradh ar luach níos airde (m.sh., 0.95), agus ginfidh luach níos ísle (m.sh., 0.5) téacs níos dírithe agus níos coimeádaí. (Réamhshocrú: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Spás oibre", "Workspace Permissions": "Ceadanna Spás Oibre", "Write": "Scríobh", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index b8eb02eebbb..795f026d98d 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "Hai già un account?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "un assistente", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Connessioni", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "Contenuto", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "URL della chat condivisa copiato negli appunti!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Abilita nuove iscrizioni", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assicurati che il tuo file CSV includa 4 colonne in questo ordine: Nome, Email, Password, Ruolo.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Includi il flag `--api` quando esegui stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informazioni", "Input commands": "Comandi di input", "Install from Github URL": "Eseguire l'installazione dall'URL di Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Registra voce", "Redirecting you to Open WebUI Community": "Reindirizzamento alla comunità OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "Rifiutato quando non avrebbe dovuto", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Imposta voce", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Impostazioni", "Settings saved successfully!": "Impostazioni salvate con successo!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt di sistema", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Grazie per il tuo feedback!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Il punteggio dovrebbe essere un valore compreso tra 0.0 (0%) e 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Area di lavoro", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index ebe4d2fc6ca..252d30029e3 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "通話中に音声の割り込みを許可", "Allowed Endpoints": "", "Already have an account?": "すでにアカウントをお持ちですか?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "アシスタント", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "接続", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WEBUIへの接続について管理者に問い合わせ下さい。", "Content": "コンテンツ", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "コントロール", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "コピー", "Copied shared chat URL to clipboard!": "共有チャットURLをクリップボードにコピーしました!", "Copied to clipboard": "クリップボードにコピーしました。", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "メッセージ評価を有効にする", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "新規登録を有効にする", "Enabled": "有効", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSVファイルに4つの列が含まれていることを確認してください: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webuiを実行する際に`--api`フラグを含める", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "情報", "Input commands": "入力コマンド", "Install from Github URL": "Github URLからインストール", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "音声を録音", "Redirecting you to Open WebUI Community": "OpenWebUI コミュニティにリダイレクトしています", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "拒否すべきでないのに拒否した", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "音声を設定", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "設定", "Settings saved successfully!": "設定が正常に保存されました!", @@ -964,7 +964,7 @@ "System Prompt": "システムプロンプト", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "ご意見ありがとうございます!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "スコアは0.0(0%)から1.0(100%)の間の値にしてください。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "テーマ", "Thinking...": "思考中...", "This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "ワイドスクリーンモード", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "ワークスペース", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 952788840bf..aba923399e4 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "დაშვებული ბოლოწერტილები", "Already have an account?": "უკვე გაქვთ ანგარიში?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "ყოველთვის", "Amazing": "გადასარევია", "an assistant": "დამხმარე", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "კავშირები", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "შემცველობა", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "მმართველები", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "დაკოპირდა", "Copied shared chat URL to clipboard!": "გაზიარებული ჩატის ბმული დაკოპირდა ბუფერში!", "Copied to clipboard": "დაკოპირდა გაცვლის ბაფერში", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "ახალი რეგისტრაციების ჩართვა", "Enabled": "ჩართულია", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "დარწმუნდით, რომ თქვენი CSV-ფაილი შეიცავს 4 ველს ამ მიმდევრობით: სახელი, ელფოსტა, პაროლი, როლი.", @@ -566,7 +566,7 @@ "Include": "ჩართვა", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "ინფორმაცია", "Input commands": "შეიყვანეთ ბრძანებები", "Install from Github URL": "დაყენება Github-ის ბმულიდან", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "ხმის ჩაწერა", "Redirecting you to Open WebUI Community": "მიმდინარეობს გადამისამართება OpenWebUI-ის საზოგადოების საიტზე", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "უარა, როგორც უნდა იყოს", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "ხმის დაყენება", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "მორგება", "Settings saved successfully!": "პარამეტრები შენახვა წარმატებულია!", @@ -964,7 +964,7 @@ "System Prompt": "სისტემური მოთხოვნა", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "ამოცანები", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "მადლობა გამოხმაურებისთვის!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "რეიტინგი უნდა იყოს მნიშვნელობ შუალედიდან 0.0 (0%) - 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "თემა", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "რატომ?", "Widescreen Mode": "", "Won": "ვონი", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "სამუშაო სივრცე", "Workspace Permissions": "", "Write": "ჩაწერა", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index f35469cb4e9..a94bc49e59e 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "음성 기능에서 음성 방해 허용", "Allowed Endpoints": "", "Already have an account?": "이미 계정이 있으신가요?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "놀라움", "an assistant": "어시스턴트", @@ -208,7 +208,7 @@ "Confirm your new password": "새로운 비밀번호를 한 번 더 입력해 주세요", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "연결", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WebUI 접속을 위해서는 관리자에게 연락에 연락하십시오", "Content": "내용", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "TTS 요청에 메시지가 어떻게 나뉘어지는지 제어하십시오. '문장 부호'는 문장으로 나뉘고, '문단'은 문단으로 나뉘고, '없음'은 메세지를 하나의 문자열로 인식합니다.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "제어", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "복사됨", "Copied shared chat URL to clipboard!": "채팅 공유 URL이 클립보드에 복사되었습니다!", "Copied to clipboard": "클립보드에 복사되었습니다", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "메시지 평가 활성화", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "새 회원가입 활성화", "Enabled": "활성화됨", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV 파일에 이름, 이메일, 비밀번호, 역할 4개의 열이 순서대로 포함되어 있는지 확인하세요.", @@ -566,7 +566,7 @@ "Include": "포함", "Include `--api-auth` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api-auth` 플래그를 포함하세요", "Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api` 플래그를 포함하세요", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "정보", "Input commands": "명령어 입력", "Install from Github URL": "Github URL에서 설치", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "음성 녹음", "Redirecting you to Open WebUI Community": "OpenWebUI 커뮤니티로 리디렉션 중", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "스스로를 \"사용자\" 라고 지칭하세요. (예: \"사용자는 영어를 배우고 있습니다\")", "References from": "출처", "Refused when it shouldn't have": "허용되지 않았지만 허용되어야 합니다.", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "음성 설정", "Set whisper model": "자막 생성기 모델 설정", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "생성을 위한 무작위 숫자 시드를 설정합니다. 이 값을 특정 숫자로 설정하면 동일한 프롬프트에 대해 동일한 텍스트를 생성합니다. (기본값: 무작위)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "중단 시퀀스를 설정합니다. 이 패턴이 발생하면 LLM은 텍스트 생성을 중단하고 반환합니다. 여러 중단 패턴은 모델 파일에서 여러 개의 별도 중단 매개변수를 지정하여 설정할 수 있습니다.", "Settings": "설정", "Settings saved successfully!": "설정이 성공적으로 저장되었습니다!", @@ -964,7 +964,7 @@ "System Prompt": "시스템 프롬프트", "Tags Generation": "태그 생성", "Tags Generation Prompt": "태그 생성 프롬프트", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "탭하여 중단", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "피드백 감사합니다!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "이 플러그인 뒤에 있는 개발자는 커뮤니티에서 활동하는 단순한 열정적인 일반인들입니다. 만약 플러그인이 도움 되었다면, 플러그인 개발에 기여를 고려해주세요!", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "평가 리더보드는 Elo 평가 시스템을 기반으로 하고 실시간으로 업데이트됩니다", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "최대 파일 크기(MB). 만약 파일 크기가 한도를 초과할 시, 파일은 업로드되지 않습니다", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "하나의 채팅에서는 사용가능한 최대 파일 수가 있습니다. 만약 파일 수가 한도를 초과할 시, 파일은 업로드되지 않습니다.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "점수는 0.0(0%)에서 1.0(100%) 사이의 값이어야 합니다.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "모델의 온도. 온도를 높이면 모델이 더 창의적으로 답변합니다. (기본값: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "테마", "Thinking...": "생각 중...", "This action cannot be undone. Do you wish to continue?": "이 액션은 되돌릴 수 없습니다. 계속 하시겠습니까?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "이 행동은 컬렉션에 존재하는 모든 파일을 삭제하고 새로 업로드된 파일들로 대체됩니다", "This response was generated by \"{{model}}\"": "\"{{model}}\"이 생성한 응답입니다", "This will delete": "이것은 다음을 삭제합니다.", @@ -1132,7 +1132,7 @@ "Why?": "이유는?", "Widescreen Mode": "와이드스크린 모드", "Won": "승리", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "워크스페이스", "Workspace Permissions": "워크스페이스 권한", "Write": "", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 999c19ca505..10449ddfa9c 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Leisti pertraukimą skambučio metu", "Allowed Endpoints": "", "Already have an account?": "Ar jau turite paskyrą?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "assistentas", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Ryšiai", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Susisiekite su administratoriumi dėl prieigos", "Content": "Turinys", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Valdymas", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Nukopijuota", "Copied shared chat URL to clipboard!": "Nukopijavote pokalbio nuorodą", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktyvuoti naujas registracijas", "Enabled": "Leisti", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Įsitikinkite, kad CSV failas turi 4 kolonas šiuo eiliškumu: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "Įtraukti `--api-auth` flag when running stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Pridėti `--api` kai vykdomas stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informacija", "Input commands": "Įvesties komandos", "Install from Github URL": "Instaliuoti Github nuorodą", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Įrašyti balsą", "Redirecting you to Open WebUI Community": "Perkeliam Jus į OpenWebUI bendruomenę", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Vadinkite save Naudotoju (pvz. Naudotojas mokosi prancūzų kalbos)", "References from": "", "Refused when it shouldn't have": "Atmesta kai neturėtų būti atmesta", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Numatyti balsą", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Nustatymai", "Settings saved successfully!": "Parametrai sėkmingai išsaugoti!", @@ -964,7 +964,7 @@ "System Prompt": "Sistemos užklausa", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Paspauskite norėdami pertraukti", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Ačiū už atsiliepimus", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Šis modulis kuriamas savanorių. Palaikykite jų darbus finansiškai arba prisidėdami kodu.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Rezultatas turėtų būti tarp 0.0 (0%) ir 1.0 (100%)", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Mąsto...", "This action cannot be undone. Do you wish to continue?": "Šis veiksmas negali būti atšauktas. Ar norite tęsti?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Tai ištrins", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Plataus ekrano rėžimas", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Nuostatos", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 9758e4f7bc0..da2a20efda8 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Benarkan gangguan suara dalam panggilan", "Allowed Endpoints": "", "Already have an account?": "Telah mempunyai akaun?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "seorang pembantu", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Sambungan", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Hubungi admin untuk akses WebUI", "Content": "Kandungan", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Kawalan", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Disalin", "Copied shared chat URL to clipboard!": "Menyalin URL sembang kongsi ke papan klip", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Benarkan Pendaftaran Baharu", "Enabled": "Dibenarkan", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "astikan fail CSV anda mengandungi 4 lajur dalam susunan ini: Nama, E-mel, Kata Laluan, Peranan.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "Sertakan bendera `-- api -auth` semasa menjalankan stable-diffusion-webui ", "Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `-- api ` semasa menjalankan stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Maklumat", "Input commands": "Masukkan Arahan", "Install from Github URL": "Pasang daripada URL Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Rakam suara", "Redirecting you to Open WebUI Community": "Membawa anda ke Komuniti OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Rujuk diri anda sebagai \"User\" (cth, \"Pengguna sedang belajar bahasa Sepanyol\")", "References from": "", "Refused when it shouldn't have": "Menolak dimana ia tidak sepatutnya", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Tetapan Suara", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Tetapan", "Settings saved successfully!": "Tetapan berjaya disimpan!", @@ -964,7 +964,7 @@ "System Prompt": "Gesaan Sistem", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Sentuh untuk mengganggu", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Terima kasih atas maklum balas anda!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Pembangun di sebalik 'plugin' ini adalah sukarelawan yang bersemangat daripada komuniti. Jika anda mendapati 'plugin' ini membantu, sila pertimbangkan untuk menyumbang kepada pembangunannya.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skor hendaklah berada diantara 0.0 (0%) dan 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Berfikir...", "This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak boleh diubah semula kepada asal. Adakah anda ingin teruskan", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Ini akan memadam", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Mod Skrin Lebar", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Ruangan Kerja", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 27eca6694de..67e7b843c58 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Muliggjør taleavbrytelse i samtaler", "Allowed Endpoints": "Tillatte endepunkter", "Already have an account?": "Har du allerede en konto?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativ til top_p, og har som mål å sikre en balanse mellom kvalitet og variasjon. Parameteren p representerer minimumssannsynligheten for at et token skal vurderes, i forhold til sannsynligheten for det mest sannsynlige tokenet. Hvis p for eksempel er 0,05 og det mest sannsynlige tokenet har en sannsynlighet på 0,9, filtreres logits med en verdi på mindre enn 0,045 bort. (Standard: 0,0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Alltid", "Amazing": "Flott", "an assistant": "en assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "Bekreft det nye passordet ditt", "Connect to your own OpenAI compatible API endpoints.": "Koble til egne OpenAI-kompatible API-endepunkter", "Connections": "Tilkoblinger", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Begrenser resonneringsinnsatsen for resonneringsmodeller. Gjelder bare for resonneringsmodeller fra bestemte leverandører som har støtte for resonneringsinnsats. (Standard: middels)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakt administrator for å få tilgang til WebUI", "Content": "Innhold", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Fortsett med e-post", "Continue with LDAP": "Fortsett med LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrollerer hvordan meldingsteksten deles opp for TTS-forespørsler. 'Punctuation' deler opp i setninger, 'paragraphs' deler opp i avsnitt, og 'none' beholder meldingen som én enkelt streng.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Kontrollerer repetisjonen av tokensekvenser i den genererte teksten. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 1,1) vil være mildere. Ved 1 er den deaktivert. (Standard: 1,1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Kontroller", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Styrer balansen mellom sammenheng og mangfold i utdataene. En lavere verdi gir en mer fokusert og sammenhengende tekst. (Standard: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopiert", "Copied shared chat URL to clipboard!": "Kopierte delt chat-URL til utklippstavlen!", "Copied to clipboard": "Kopier til utklippstaveln", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiver Memory Locking (mlock) for å forhindre at modelldata byttes ut av RAM. Dette alternativet låser modellens arbeidssett med sider i RAM-minnet, slik at de ikke byttes ut til disk. Dette kan bidra til å opprettholde ytelsen ved å unngå sidefeil og sikre rask datatilgang.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiver Memory Mapping (mmap) for å laste inn modelldata. Med dette alternativet kan systemet bruke disklagring som en utvidelse av RAM ved å behandle diskfiler som om de befant seg i RAM. Dette kan forbedre modellens ytelse ved å gi raskere datatilgang. Det er imidlertid ikke sikkert at det fungerer som det skal på alle systemer, og det kan kreve mye diskplass.", "Enable Message Rating": "Aktivert vurdering av meldinger", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Aktiver Mirostat-sampling for kontroll av perpleksitet. (Standard: 0, 0 = deaktivert, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktiver nye registreringer", "Enabled": "Aktivert", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at CSV-filen din inkluderer fire kolonner i denne rekkefølgen: Navn, E-post, Passord, Rolle.", @@ -566,7 +566,7 @@ "Include": "Inkluder", "Include `--api-auth` flag when running stable-diffusion-webui": "Inkluder flagget --api-auth når du kjører stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inkluder flagget --api når du kjører stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Påvirker hvor raskt algoritmen reagerer på tilbakemeldinger fra den genererte teksten. En lavere læringshastighet vil føre til langsommere justeringer, mens en høyere læringshastighet vil gjøre algoritmen mer responsiv. (Standard: 0,1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Inntast kommandoer", "Install from Github URL": "Installer fra GitHub-URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "Resonneringsinnsats", "Record voice": "Ta opp tale", "Redirecting you to Open WebUI Community": "Omdirigerer deg til OpenWebUI-fellesskapet", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduserer sannsynligheten for å generere meningsløse svar. En høyere verdi (f.eks. 100) vil gi mer varierte svar, mens en lavere verdi (f.eks. 10) vil være mer konservativ. (Standard: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Omtal deg selv som \"Bruker\" (f.eks. \"Bruker lærer spansk\")", "References from": "Henviser fra", "Refused when it shouldn't have": "Avvist når det ikke burde ha blitt det", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Angi antall arbeidstråder som skal brukes til beregning. Dette alternativet kontrollerer hvor mange tråder som brukes til å behandle innkommende forespørsler samtidig. Hvis du øker denne verdien, kan det forbedre ytelsen under arbeidsbelastninger med høy samtidighet, men det kan også føre til økt forbruk av CPU-ressurser.", "Set Voice": "Angi stemme", "Set whisper model": "Angi whisper-modell", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Angir en flat bias mot tokener som har blitt vist minst én gang. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 0,9) vil være mildere. Ved 0 er den deaktivert. (Standard: 0)", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Angir en skaleringsbias mot tokener for å straffe gjentakelser, basert på hvor mange ganger de har dukket opp. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 0,9) vil være mildere. Ved 0 er den deaktivert. (Standard: 1,1)", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Angir hvor langt tilbake modellen skal se for å forhindre repetisjon. (Standard: 64, 0 = deaktivert, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Angir det tilfeldige tallfrøet som skal brukes til generering. Hvis du setter dette til et bestemt tall, vil modellen generere den samme teksten for den samme ledeteksten (standard: tilfeldig).", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Angir størrelsen på kontekstvinduet som brukes til å generere neste token. (Standard: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Angir hvilke stoppsekvenser som skal brukes. Når dette mønsteret forekommer, stopper LLM genereringen av tekst og returnerer. Du kan angi flere stoppmønstre ved å spesifisere flere separate stoppparametere i en modellfil.", "Settings": "Innstillinger", "Settings saved successfully!": "Innstillinger lagret!", @@ -964,7 +964,7 @@ "System Prompt": "Systemledetekst", "Tags Generation": "Genering av etiketter", "Tags Generation Prompt": "Ledetekst for genering av etikett", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling brukes til å redusere innvirkningen av mindre sannsynlige tokens fra utdataene. En høyere verdi (f.eks. 2,0) vil redusere effekten mer, mens en verdi på 1,0 deaktiverer denne innstillingen. (standard: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "Snakk med modellen", "Tap to interrupt": "Trykk for å avbryte", "Tasks": "Oppgaver", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Takk for tilbakemeldingen!", "The Application Account DN you bind with for search": "Applikasjonskontoens DN du binder deg med for søking", "The base to search for users": "Basen for å søke etter brukere", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Batchstørrelsen avgjør hvor mange tekstforespørsler som behandles samtidig. En høyere batchstørrelse kan øke ytelsen og hastigheten til modellen, men det krever også mer minne. (Standard: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Utviklerne bak denne utvidelsen er lidenskapelige frivillige fra fellesskapet. Hvis du finner denne utvidelsen nyttig, vennligst vurder å bidra til utviklingen.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ledertavlens over evalueringer er basert på Elo-rangeringssystemet, og oppdateres i sanntid.", "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-attributtet som tilsvarer e-posten som brukerne bruker for å logge på.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Den maksimale filstørrelsen i MB. Hvis en filstørrelse overskrider denne grensen, blir ikke filen lastet opp.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksimalt antall filer som kan brukes samtidig i chatten. Hvis antallet filer overskrider denne grensen, blir de ikke lastet opp.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Poengsummen skal være en verdi mellom 0,0 (0 %) og 1,0 (100 %).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperaturen på modellen. Hvis du øker temperaturen, vil modellen svare mer kreativt. (Standard: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Tenker ...", "This action cannot be undone. Do you wish to continue?": "Denne handlingen kan ikke angres. Vil du fortsette?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Dette alternativet styrer hvor mange tokens som bevares når konteksten oppdateres. Hvis det for eksempel er angitt til 2, beholdes de to siste symbolene i samtalekonteksten. Bevaring av konteksten kan bidra til å opprettholde kontinuiteten i en samtale, men det kan redusere muligheten til å svare på nye emner. (Standard: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Dette alternativet angir det maksimale antallet tokens modellen kan generere i svaret sitt. Hvis du øker denne grensen, kan modellen gi lengre svar, men det kan også øke sannsynligheten for at det genereres uhensiktsmessig eller irrelevant innhold. (Standard: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Dette alternativet sletter alle eksisterende filer i samlingen og erstatter dem med nyopplastede filer.", "This response was generated by \"{{model}}\"": "Dette svaret er generert av \"{{modell}}\"", "This will delete": "Dette sletter", @@ -1132,7 +1132,7 @@ "Why?": "Hvorfor?", "Widescreen Mode": "Bredskjermmodus", "Won": "Vant", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fungerer sammen med top-k. En høyere verdi (f.eks. 0,95) vil føre til mer mangfoldig tekst, mens en lavere verdi (f.eks. 0,5) vil generere mer fokusert og konservativ tekst. (Standard: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Arbeidsområde", "Workspace Permissions": "Tillatelser for arbeidsområde", "Write": "Skriv", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 5faaa477365..dee748ae146 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Stemonderbreking tijdens gesprek toestaan", "Allowed Endpoints": "", "Already have an account?": "Heb je al een account?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternatief voor de top_p, en streeft naar een evenwicht tussen kwaliteit en variatie. De parameter p vertegenwoordigt de minimumwaarschijnlijkheid dat een token in aanmerking wordt genomen, in verhouding tot de waarschijnlijkheid van het meest waarschijnlijke token. Bijvoorbeeld, met p=0.05 en de meest waarschijnlijke token met een waarschijnlijkheid van 0.9, worden logits met een waarde kleiner dan 0.045 uitgefilterd. (Standaard: 0,0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Geweldig", "an assistant": "een assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Verbindingen", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Neem contact op met de beheerder voor WebUI-toegang", "Content": "Inhoud", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Ga door met E-mail", "Continue with LDAP": "Ga door met LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Bepaal hoe berichttekst wordt opgesplitst voor TTS-verzoeken. 'Leestekens' splitst op in zinnen, 'alinea's' splitst op in paragrafen en 'geen' houdt het bericht als een enkele string.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Besturingselementen", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Regelt de balans tussen coherentie en diversiteit van de uitvoer. Een lagere waarde resulteert in meer gerichte en coherente tekst. (Standaard: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Gekopieerd", "Copied shared chat URL to clipboard!": "URL van gedeelde gesprekspagina gekopieerd naar klembord!", "Copied to clipboard": "Gekopieerd naar klembord", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Schakel Memory Locking (mlock) in om te voorkomen dat modelgegevens uit het RAM worden verwisseld. Deze optie vergrendelt de werkset pagina's van het model in het RAM, zodat ze niet naar de schijf worden uitgewisseld. Dit kan helpen om de prestaties op peil te houden door paginafouten te voorkomen en snelle gegevenstoegang te garanderen.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Schakel Memory Mapping (mmap) in om modelgegevens te laden. Deze optie laat het systeem schijfopslag gebruiken als een uitbreiding van RAM door schijfbestanden te behandelen alsof ze in RAM zitten. Dit kan de prestaties van het model verbeteren door snellere gegevenstoegang mogelijk te maken. Het is echter mogelijk dat deze optie niet op alle systemen correct werkt en een aanzienlijke hoeveelheid schijfruimte in beslag kan nemen.", "Enable Message Rating": "Schakel berichtbeoordeling in", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat-sampling inschakelen voor het regelen van de perplexiteit. (Standaard: 0, 0 = uitgeschakeld, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Schakel nieuwe registraties in", "Enabled": "Ingeschakeld", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Zorg ervoor dat uw CSV-bestand de volgende vier kolommen in deze volgorde bevat: Naam, E-mail, Wachtwoord, Rol.", @@ -566,7 +566,7 @@ "Include": "Voeg toe", "Include `--api-auth` flag when running stable-diffusion-webui": "Voeg '--api-auth` toe bij het uitvoeren van stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Beïnvloedt hoe snel het algoritme reageert op feedback van de gegenereerde tekst. Een lagere leersnelheid resulteert in langzamere aanpassingen, terwijl een hogere leersnelheid het algoritme gevoeliger maakt. (Standaard: 0,1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Voer commando's in", "Install from Github URL": "Installeren vanaf Github-URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Neem stem op", "Redirecting you to Open WebUI Community": "Je wordt doorgestuurd naar OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Vermindert de kans op het genereren van onzin. Een hogere waarde (bijv. 100) zal meer diverse antwoorden geven, terwijl een lagere waarde (bijv. 10) conservatiever zal zijn. (Standaard: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refereer naar jezelf als \"user\" (bv. \"User is Spaans aan het leren\"", "References from": "Referenties van", "Refused when it shouldn't have": "Geweigerd terwijl het niet had moeten", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Stel het aantal threads in dat wordt gebruikt voor berekeningen. Deze optie bepaalt hoeveel threads worden gebruikt om gelijktijdig binnenkomende verzoeken te verwerken. Het verhogen van deze waarde kan de prestaties verbeteren onder hoge concurrency werklasten, maar kan ook meer CPU-bronnen verbruiken.", "Set Voice": "Stel stem in", "Set whisper model": "Stel Whisper-model in", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Stelt in hoe ver het model terug moet kijken om herhaling te voorkomen. (Standaard: 64, 0 = uitgeschakeld, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Stelt de willekeurigheid in om te gebruiken voor het genereren. Als je dit op een specifiek getal instelt, genereert het model dezelfde tekst voor dezelfde prompt. (Standaard: willekeurig)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Stelt de grootte in van het contextvenster dat wordt gebruikt om het volgende token te genereren. (Standaard: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Stelt de te gebruiken stopsequentie in. Als dit patroon wordt gevonden, stopt de LLM met het genereren van tekst en keert terug. Er kunnen meerdere stoppatronen worden ingesteld door meerdere afzonderlijke stopparameters op te geven in een modelbestand.", "Settings": "Instellingen", "Settings saved successfully!": "Instellingen succesvol opgeslagen!", @@ -964,7 +964,7 @@ "System Prompt": "Systeem prompt", "Tags Generation": "", "Tags Generation Prompt": "Prompt voor taggeneratie", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling wordt gebruikt om de impact van minder waarschijnlijke tokens uit de uitvoer te verminderen. Een hogere waarde (bv. 2,0) zal de impact meer verminderen, terwijl een waarde van 1,0 deze instelling uitschakelt. (standaard: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Tik om te onderbreken", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Bedankt voor je feedback!", "The Application Account DN you bind with for search": "Het applicatieaccount DN waarmee je zoekt", "The base to search for users": "De basis om gebruikers te zoeken", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "De batchgrootte bepaalt hoeveel tekstverzoeken tegelijk worden verwerkt. Een hogere batchgrootte kan de prestaties en snelheid van het model verhogen, maar vereist ook meer geheugen. (Standaard: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "De ontwikkelaars achter deze plugin zijn gepassioneerde vrijwilligers uit de gemeenschap. Als je deze plugin nuttig vindt, overweeg dan om bij te dragen aan de ontwikkeling ervan.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Het beoordelingsklassement is gebaseerd op het Elo-classificatiesysteem en wordt in realtime bijgewerkt.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "De maximale bestandsgrootte in MB. Als het bestand groter is dan deze limiet, wordt het bestand niet geüpload.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Het maximum aantal bestanden dat in één keer kan worden gebruikt in de chat. Als het aantal bestanden deze limiet overschrijdt, worden de bestanden niet geüpload.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Het score moet een waarde zijn tussen 0.0 (0%) en 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "De temperatuur van het model. Als je de temperatuur verhoogt, zal het model creatiever antwoorden. (Standaard: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Thema", "Thinking...": "Aan het denken...", "This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Deze optie bepaalt hoeveel tokens bewaard blijven bij het verversen van de context. Als deze bijvoorbeeld op 2 staat, worden de laatste 2 tekens van de context van het gesprek bewaard. Het behouden van de context kan helpen om de continuïteit van een gesprek te behouden, maar het kan de mogelijkheid om te reageren op nieuwe onderwerpen verminderen. (Standaard: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Deze optie stelt het maximum aantal tokens in dat het model kan genereren in zijn antwoord. Door deze limiet te verhogen, kan het model langere antwoorden geven, maar het kan ook de kans vergroten dat er onbehulpzame of irrelevante inhoud wordt gegenereerd. (Standaard: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Deze optie verwijdert alle bestaande bestanden in de collectie en vervangt ze door nieuw geüploade bestanden.", "This response was generated by \"{{model}}\"": "Dit antwoord is gegenereerd door \"{{model}}\"", "This will delete": "Dit zal verwijderen", @@ -1132,7 +1132,7 @@ "Why?": "Waarom?", "Widescreen Mode": "Breedschermmodus", "Won": "Gewonnen", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Werkt samen met top-k. Een hogere waarde (bijv. 0,95) zal leiden tot meer diverse tekst, terwijl een lagere waarde (bijv. 0,5) meer gerichte en conservatieve tekst zal genereren. (Standaard: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Werkruimte", "Workspace Permissions": "Werkruimtemachtigingen", "Write": "", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index 1fff2933fee..23a5a0d1993 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "ਪਹਿਲਾਂ ਹੀ ਖਾਤਾ ਹੈ?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "ਇੱਕ ਸਹਾਇਕ", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "ਕਨੈਕਸ਼ਨ", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "ਸਮੱਗਰੀ", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "ਸਾਂਝੇ ਕੀਤੇ ਗੱਲਬਾਤ URL ਨੂੰ ਕਲਿੱਪਬੋਰਡ 'ਤੇ ਕਾਪੀ ਕਰ ਦਿੱਤਾ!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "ਨਵੇਂ ਸਾਈਨ ਅਪ ਯੋਗ ਕਰੋ", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ਸੁਨਿਸ਼ਚਿਤ ਕਰੋ ਕਿ ਤੁਹਾਡੀ CSV ਫਾਈਲ ਵਿੱਚ ਇਸ ਕ੍ਰਮ ਵਿੱਚ 4 ਕਾਲਮ ਹਨ: ਨਾਮ, ਈਮੇਲ, ਪਾਸਵਰਡ, ਭੂਮਿਕਾ।", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "ਸਟੇਬਲ-ਡਿਫਿਊਸ਼ਨ-ਵੈਬਯੂਆਈ ਚਲਾਉਣ ਸਮੇਂ `--api` ਝੰਡਾ ਸ਼ਾਮਲ ਕਰੋ", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "ਜਾਣਕਾਰੀ", "Input commands": "ਇਨਪੁਟ ਕਮਾਂਡਾਂ", "Install from Github URL": "Github URL ਤੋਂ ਇੰਸਟਾਲ ਕਰੋ", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "ਆਵਾਜ਼ ਰਿਕਾਰਡ ਕਰੋ", "Redirecting you to Open WebUI Community": "ਤੁਹਾਨੂੰ ਓਪਨਵੈਬਯੂਆਈ ਕਮਿਊਨਿਟੀ ਵੱਲ ਰੀਡਾਇਰੈਕਟ ਕੀਤਾ ਜਾ ਰਿਹਾ ਹੈ", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "ਜਦੋਂ ਇਹ ਨਹੀਂ ਹੋਣਾ ਚਾਹੀਦਾ ਸੀ ਤਾਂ ਇਨਕਾਰ ਕੀਤਾ", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "ਆਵਾਜ਼ ਸੈੱਟ ਕਰੋ", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "ਸੈਟਿੰਗਾਂ", "Settings saved successfully!": "ਸੈਟਿੰਗਾਂ ਸਫਲਤਾਪੂਰਵਕ ਸੰਭਾਲੀਆਂ ਗਈਆਂ!", @@ -964,7 +964,7 @@ "System Prompt": "ਸਿਸਟਮ ਪ੍ਰੰਪਟ", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "ਤੁਹਾਡੇ ਫੀਡਬੈਕ ਲਈ ਧੰਨਵਾਦ!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "ਸਕੋਰ 0.0 (0%) ਅਤੇ 1.0 (100%) ਦੇ ਵਿਚਕਾਰ ਹੋਣਾ ਚਾਹੀਦਾ ਹੈ।", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "ਥੀਮ", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "ਕਾਰਜਸਥਲ", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 42c8d19dc58..4d20109f9c1 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Zezwól na przerwanie połączenia głosowego", "Allowed Endpoints": "Dozwolone punkty końcowe", "Already have an account?": "Czy masz już konto?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternatywa dla top_p, mająca na celu zapewnienie równowagi między jakością a różnorodnością. Parametr p reprezentuje minimalne prawdopodobieństwo, aby token był brany pod uwagę, względem prawdopodobieństwa najbardziej prawdopodobnego tokena. Na przykład, dla p=0,05 i najbardziej prawdopodobnym tokenem o prawdopodobieństwie 0,9, logity o wartości mniejszej niż 0,045 są wykluczane. (Domyślnie: 0,0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Zawsze", "Amazing": "Niesamowite", "an assistant": "asystent", @@ -208,7 +208,7 @@ "Confirm your new password": "Potwierdź nowe hasło", "Connect to your own OpenAI compatible API endpoints.": "Połącz się ze swoimi własnymi punktami końcowymi API kompatybilnego z OpenAI.", "Connections": "Połączenia", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Ogranicza wysiłek rozumowania dla modeli rozumowania. Stosuje się tylko do modeli rozumowania od określonych dostawców, którzy obsługują wysiłek rozumowania. (Domyślnie: średni)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Skontaktuj się z administratorem, aby uzyskać dostęp do WebUI.", "Content": "Treść", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Kontynuuj z e-mailem", "Continue with LDAP": "Kontynuuj z LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontroluj sposób dzielenia tekstu wiadomości dla żądań TTS. 'Punctuation' dzieli na zdania, 'paragraphs' dzieli na akapity, a 'none' pozostawia wiadomość jako pojedynczy ciąg znaków.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Kontroluj powtarzanie się sekwencji tokenów w wygenerowanym tekście. Wyższa wartość (np. 1,5) będzie silniej penalizować powtórzenia, podczas gdy niższa wartość (np. 1,1) będzie bardziej pobłażliwa. Na poziomie 1 jest wyłączona. (Domyślnie: 1,1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Ustawienia", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Kontroluje równowagę między spójnością a zróżnicowaniem wyników. Niższa wartość zaowocuje bardziej skoncentrowanym i spójnym tekstem. (Domyślnie: 5,0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Skopiowane", "Copied shared chat URL to clipboard!": "Skopiowano udostępniony URL czatu do schowka!", "Copied to clipboard": "Skopiowane do schowka", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Włącz blokowanie pamięci (mlock), aby zapobiec swappingowi danych modelu z RAM. Ta opcja blokuje zbiór stron roboczych modelu w RAM, co gwarantuje, że nie będą one wymieniane na dysk. Może to pomóc w utrzymaniu wydajności poprzez unikanie błędów strony i zapewnienie szybkiego dostępu do danych.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Włącz mapowanie pamięci (mmap), aby załadować dane modelu. Ta opcja pozwala systemowi traktować pliki dysku jako rozszerzenie RAM, co może poprawić wydajność modelu przez umożliwienie szybszego dostępu do danych. Należy jednak pamiętać, że ta funkcja może nie działać poprawnie ze wszystkimi systemami i zużywać znaczną ilość przestrzeni dyskowej.", "Enable Message Rating": "Włącz ocenianie wiadomości", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Włącz próbkowanie Mirostat w celu kontrolowania perplexity. (Domyślnie: 0, 0 = Wyłączone, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Włącz nowe rejestracje", "Enabled": "Włączone", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Upewnij się, że twój plik CSV zawiera dokładnie 4 kolumny w następującej kolejności: Nazwa, Email, Hasło, Rola.", @@ -566,7 +566,7 @@ "Include": "Włączyć", "Include `--api-auth` flag when running stable-diffusion-webui": "Użyj flagi `--api-auth` podczas uruchamiania stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Użyj flagi `--api` podczas uruchamiania stable-diffusion-webui.", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Wpływa na to, jak szybko algorytm reaguje na informacje zwrotne z wygenerowanego tekstu. Niższa stopa uczenia się spowoduje wolniejsze dostosowania, podczas gdy wyższa stopa uczenia się sprawi, że algorytm będzie bardziej reaktywny. (Domyślna: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informacje", "Input commands": "Wprowadź polecenia", "Install from Github URL": "Instalacja z adresu URL serwisu Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "Wysiłek rozumowania", "Record voice": "Nagraj swój głos", "Redirecting you to Open WebUI Community": "Przekierowujemy Cię do społeczności Open WebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Zmniejsza prawdopodobieństwo generowania bezsensownych odpowiedzi. Wyższa wartość (np. 100) daje bardziej zróżnicowane odpowiedzi, podczas gdy niższa wartość (np. 10) jest bardziej konserwatywna. (Domyślnie: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odnosić się do siebie jako \"Użytkownik\" (np. \"Użytkownik uczy się hiszpańskiego\")", "References from": "Odniesienia do", "Refused when it shouldn't have": "Odmówił, gdy nie powinien", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ustaw liczbę wątków pracowników używanych do obliczeń. Ta opcja kontroluje, ile wątków jest używanych do jednoczesnego przetwarzania przychodzących żądań. Zwiększenie tej wartości może poprawić wydajność pod wysokim obciążeniem, ale może również zużywać więcej zasobów CPU.", "Set Voice": "Ustaw głos", "Set whisper model": "Ustaw model szeptu", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Ustawia płaską karę za tokeny, które pojawiły się przynajmniej raz. Wyższa wartość (np. 1,5) bardziej surowo karze powtórzenia, podczas gdy niższa wartość (np. 0,9) jest bardziej pobłażliwa. Przy 0 jest wyłączona. (Domyślnie: 0)", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Ustawia skalowanie przeciwko tokenom, aby penalizować powtórzenia na podstawie liczby ich wystąpień. Wyższa wartość (np. 1,5) bardziej penalizuje powtórzenia, natomiast niższa wartość (np. 0,9) jest bardziej pobłażliwa. Przy 0 jest wyłączona. (Domyślnie: 1,1)", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Określa, jak daleko wstecz model powinien sięgać, aby zapobiec powtarzaniu się. (Domyślnie: 64, 0 = wyłączone, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ustawia ziarno liczby losowej do użycia podczas generowania. Ustawienie tego na konkretną liczbę spowoduje, że model będzie generował ten sam tekst dla tego samego zapytania. (Domyślnie: losowe)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Ustawia rozmiar okna kontekstowego używanego do generowania następnego tokenu. (Domyślnie: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ustawia sekwencje stopu do użycia. Gdy ten wzorzec zostanie napotkany, LLM przestanie generować tekst i zwróci wynik. Można skonfigurować wiele sekwencji stopu, określając kilka oddzielnych parametrów stopu w pliku modelu.", "Settings": "Ustawienia", "Settings saved successfully!": "Ustawienia zostały zapisane pomyślnie!", @@ -964,7 +964,7 @@ "System Prompt": "Podpowiedź systemowa", "Tags Generation": "Generowanie tagów", "Tags Generation Prompt": "Podpowiedź do generowania tagów", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Próbkowanie bez ogona jest używane do zmniejszenia wpływu mniej prawdopodobnych tokenów na wyjście. Wyższa wartość (np. 2,0) zmniejszy ten wpływ bardziej, podczas gdy wartość 1,0 wyłącza to ustawienie. (domyślnie: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Kliknij, aby przerwać", "Tasks": "Zadania", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Dziękuję za twoją opinię!", "The Application Account DN you bind with for search": "Konto techniczne w formacie DN, z którym się wiążesz w celu przeszukiwania", "The base to search for users": "Podstawa do wyszukiwania użytkowników", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Rozmiar partii określa, ile żądań tekstu jest przetwarzanych razem w jednym czasie. Większy rozmiar partii może zwiększyć wydajność i szybkość modelu, ale wymaga również więcej pamięci. (Domyślnie: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Twórcy tego wtyczki to entuzjaści, którzy działają jako wolontariusze ze społeczności. Jeśli uważasz, że ta wtyczka jest pomocna, rozważ wsparcie jej rozwoju.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tablica wyników oceny opiera się na systemie rankingu Elo i jest aktualizowana w czasie rzeczywistym.", "The LDAP attribute that maps to the mail that users use to sign in.": "Atrybut LDAP, który mapuje się na adres e-mail używany przez użytkowników do logowania.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksymalny rozmiar pliku w MB. Jeśli rozmiar pliku przekroczy ten limit, plik nie zostanie przesłany.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksymalna liczba plików, które można użyć jednocześnie w czacie. Jeśli liczba plików przekroczy ten limit, pliki nie zostaną przesłane.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Wynik powinien być wartością pomiędzy 0,0 (0%) a 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperatura modelu. Zwiększenie temperatury sprawi, że model będzie odpowiadał w sposób bardziej kreatywny. (Domyślnie: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Motyw", "Thinking...": "Myślę...", "This action cannot be undone. Do you wish to continue?": "Czy na pewno chcesz kontynuować? Ta akcja nie może zostać cofnięta.", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Ta opcja kontroluje, ile tokenów jest zachowanych podczas odświeżania kontekstu. Na przykład, jeśli ustawiona na 2, ostatnie 2 tokeny kontekstu rozmowy zostaną zachowane. Zachowywanie kontekstu może pomóc w utrzymaniu ciągłości rozmowy, ale może zmniejszyć zdolność do odpowiadania na nowe tematy. (Domyślnie: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Ta opcja ustawia maksymalną liczbę tokenów, które model może wygenerować w swojej odpowiedzi. Zwiększenie tego limitu pozwala modelowi na dostarczanie dłuższych odpowiedzi, ale może również zwiększyć prawdopodobieństwo generowania nieprzydatnych lub nieistotnych treści. (Domyślnie: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ta opcja usunie wszystkie istniejące pliki w kolekcji i zastąpi je nowo przesłanymi plikami.", "This response was generated by \"{{model}}\"": "Ta odpowiedź została wygenerowana przez \"{{model}}\".", "This will delete": "To usunie wszystkie pliki z katalogu.", @@ -1132,7 +1132,7 @@ "Why?": "Dlaczego?", "Widescreen Mode": "Tryb panoramiczny", "Won": "Wygrał", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Działa razem z top-k. Wyższa wartość (np. 0,95) prowadzi do bardziej zróżnicowanego tekstu, podczas gdy niższa wartość (np. 0,5) generuje bardziej skoncentrowany i konserwatywny tekst. (Domyślnie: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Obszar roboczy", "Workspace Permissions": "Uprawnienia do przestrzeni roboczej", "Write": "Napisz", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 86dc3377f90..e7dbc89dd49 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Permitir Interrupção de Voz na Chamada", "Allowed Endpoints": "", "Already have an account?": "Já tem uma conta?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa ao 'top_p', e visa garantir um equilíbrio entre qualidade e variedade. O parâmetro 'p' representa a probabilidade mínima para que um token seja considerado, em relação à probabilidade do token mais provável. Por exemplo, com 'p=0.05' e o token mais provável com probabilidade de '0.9', as predições com valor inferior a '0.045' são filtrados. (Default: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Incrível", "an assistant": "um assistente", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Conexões", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contate o Admin para Acesso ao WebUI", "Content": "Conteúdo", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Continuar com Email", "Continue with LDAP": "Continuar com LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar como o texto do mensagem é dividido para solicitações TTS. 'Pontuação' dividida em frases, 'parágrafos' divide em parágrafos e 'não' mantém a mensagem como uma cadeia de caracteres.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Controles", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar o equilibrio entre a coerencia e a diversidade da saída. Um valor mais baixo fará com que o texto seja mais focado e coerente. (Padrão: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Copiado", "Copied shared chat URL to clipboard!": "URL de chat compartilhado copiado para a área de transferência!", "Copied to clipboard": "Copiado para a área de transferência", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilite o bloqueio de memória (mlock) para evitar que os dados do modelo sejam transferidos da RAM para a área de troca (swap). Essa opção bloqueia o conjunto de páginas em uso pelo modelo na RAM, garantindo que elas não sejam transferidas para o disco. Isso pode ajudar a manter o desempenho, evitando falhas de página e garantindo acesso rápido aos dados.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Habilite o mapeamento de memória (mmap) para carregar dados do modelo. Esta opção permite que o sistema use o armazenamento em disco como uma extensão da RAM, tratando os arquivos do disco como se estivessem na RAM. Isso pode melhorar o desempenho do modelo, permitindo acesso mais rápido aos dados. No entanto, pode não funcionar corretamente com todos os sistemas e consumir uma quantidade significativa de espaço em disco.", "Enable Message Rating": "Ativar Avaliação de Mensagens", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilite a amostragem Mirostat para controlar a perplexidade. (Padrão: 0, 0 = Desativado, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ativar Novos Cadastros", "Enabled": "Ativado", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Certifique-se de que seu arquivo CSV inclua 4 colunas nesta ordem: Nome, Email, Senha, Função.", @@ -566,7 +566,7 @@ "Include": "Incluir", "Include `--api-auth` flag when running stable-diffusion-webui": "Incluir a flag `--api-auth` ao executar stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Incluir a flag `--api` ao executar stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Define a rapidez com que o algoritmo responde ao feedback do texto gerado. Uma taxa de aprendizado menor resultará em ajustes mais lentos, enquanto uma taxa maior tornará o algoritmo mais responsivo. (Padrão: 0,1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informação", "Input commands": "Comandos de entrada", "Install from Github URL": "Instalar da URL do Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Gravar voz", "Redirecting you to Open WebUI Community": "Redirecionando você para a Comunidade OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduz a probabilidade de gerar absurdos. Um valor mais alto (por exemplo, 100) dará respostas mais diversas, enquanto um valor mais baixo (por exemplo, 10) será mais conservador. (Padrão: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refira-se como \"Usuário\" (por exemplo, \"Usuário está aprendendo espanhol\")", "References from": "Referências de", "Refused when it shouldn't have": "Recusado quando não deveria", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Defina o número de threads de trabalho usadas para computação. Esta opção controla quantos threads são usados para processar as solicitações recebidas de forma simultânea. Aumentar esse valor pode melhorar o desempenho em cargas de trabalho de alta concorrência, mas também pode consumir mais recursos da CPU.", "Set Voice": "Definir Voz", "Set whisper model": "Definir modelo Whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Define a distância de retrocesso que o modelo deve olhar para evitar repetições. (Padrão: 64, 0 = desativado, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Define a semente do número aleatório a ser usada para a geração. Definir isso como um número específico fará com que o modelo gere o mesmo texto para o mesmo prompt. (Padrão: aleatório)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Define o tamanho da janela de contexto usada para gerar o próximo token. (Padrão: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Define as sequências de parada a serem usadas. Quando esse padrão for encontrado, o modelo de linguagem (LLM) parará de gerar texto e retornará. Vários padrões de parada podem ser definidos especificando parâmetros de parada separados em um arquivo de modelo.", "Settings": "Configurações", "Settings saved successfully!": "Configurações salvas com sucesso!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt do Sistema", "Tags Generation": "", "Tags Generation Prompt": "Prompt para geração de Tags", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "A amostragem *tail free* é usada para reduzir o impacto de tokens menos prováveis na saída. Um valor mais alto (por exemplo, 2,0) reduzirá mais o impacto, enquanto um valor de 1,0 desativa essa configuração. (Padrão: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Toque para interromper", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Obrigado pelo seu comentário!", "The Application Account DN you bind with for search": "O DN (Distinguished Name) da Conta de Aplicação com a qual você se conecta para pesquisa.", "The base to search for users": "Base para pesquisar usuários.", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "O tamanho do lote (batch size) determina quantas solicitações de texto são processadas juntas de uma vez. Um tamanho de lote maior pode aumentar o desempenho e a velocidade do modelo, mas também requer mais memória. (Padrão: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Os desenvolvedores por trás deste plugin são voluntários apaixonados da comunidade. Se você achar este plugin útil, considere contribuir para o seu desenvolvimento.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "A evolução do ranking de avaliação é baseada no sistema Elo e será atualizada em tempo real.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Máximo tamanho de arquivo em MB. Se o tamanho do arquivo exceder este limite, o arquivo não será enviado.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "O número máximo de arquivos que podem ser utilizados a cada vez em chat. Se o número de arquivos exceder este limite, os arquivos não serão enviados.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperatura do modelo. Aumentar a temperatura fará com que o modelo responda de forma mais criativa. (Padrão: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Pensando...", "This action cannot be undone. Do you wish to continue?": "Esta ação não pode ser desfeita. Você deseja continuar?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Esta opção controla quantos tokens são preservados ao atualizar o contexto. Por exemplo, se definido como 2, os últimos 2 tokens do contexto da conversa serão mantidos. Preservar o contexto pode ajudar a manter a continuidade da conversa, mas pode reduzir a capacidade de responder a novos tópicos. (Padrão: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Esta opção define o número máximo de tokens que o modelo pode gerar em sua resposta. Aumentar esse limite permite que o modelo forneça respostas mais longas, mas também pode aumentar a probabilidade de gerar conteúdo irrelevante ou não útil. (Padrão: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Essa opção deletará todos os arquivos existentes na coleção e todos eles serão substituídos.", "This response was generated by \"{{model}}\"": "Esta resposta foi gerada por \"{{model}}\"", "This will delete": "Isso vai excluir", @@ -1132,7 +1132,7 @@ "Why?": "Por que", "Widescreen Mode": "Modo Tela Cheia", "Won": "Ganhou", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona em conjunto com o top-k. Um valor mais alto (por exemplo, 0,95) levará a um texto mais diversificado, enquanto um valor mais baixo (por exemplo, 0,5) gerará um texto mais focado e conservador. (Padrão: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espaço de Trabalho", "Workspace Permissions": "Permissões do espaço de trabalho", "Write": "", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index e8ef705a8ea..57b5c3d48fc 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "Já tem uma conta?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "um assistente", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Conexões", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contatar Admin para acesso ao WebUI", "Content": "Conteúdo", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "URL de Conversa partilhado copiada com sucesso!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ativar Novas Inscrições", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Confirme que o seu ficheiro CSV inclui 4 colunas nesta ordem: Nome, E-mail, Senha, Função.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informação", "Input commands": "Comandos de entrada", "Install from Github URL": "Instalar a partir do URL do Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Gravar voz", "Redirecting you to Open WebUI Community": "Redirecionando-o para a Comunidade OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refera-se a si próprio como \"User\" (por exemplo, \"User está a aprender Espanhol\")", "References from": "", "Refused when it shouldn't have": "Recusado quando não deveria", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Definir Voz", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Configurações", "Settings saved successfully!": "Configurações guardadas com sucesso!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt do Sistema", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Obrigado pelo seu feedback!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "A pensar...", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Modo Widescreen", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Espaço de Trabalho", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index 4911aa52797..e2408336d91 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Permite intreruperea vocii în apel", "Allowed Endpoints": "", "Already have an account?": "Deja ai un cont?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Întotdeauna", "Amazing": "Uimitor", "an assistant": "un asistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Conexiuni", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contactează administratorul pentru acces WebUI", "Content": "Conținut", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlează modul în care textul mesajului este divizat pentru cererile TTS. 'Punctuation' împarte în propoziții, 'paragraphs' împarte în paragrafe, iar 'none' menține mesajul ca un șir unic.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Controale", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Copiat", "Copied shared chat URL to clipboard!": "URL-ul conversației partajate a fost copiat în clipboard!", "Copied to clipboard": "Copiat în clipboard", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Activează Evaluarea Mesajelor", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activează Înscrierile Noi", "Enabled": "Activat", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asigurați-vă că fișierul CSV include 4 coloane în această ordine: Nume, Email, Parolă, Rol.", @@ -566,7 +566,7 @@ "Include": "Include", "Include `--api-auth` flag when running stable-diffusion-webui": "Includeți flag-ul `--api-auth` când rulați stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Includeți flag-ul `--api` când rulați stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Informații", "Input commands": "Comenzi de intrare", "Install from Github URL": "Instalează de la URL-ul Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Înregistrează vocea", "Redirecting you to Open WebUI Community": "Vă redirecționăm către Comunitatea OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referiți-vă la dvs. ca \"Utilizator\" (de ex., \"Utilizatorul învață spaniolă\")", "References from": "Referințe din", "Refused when it shouldn't have": "Refuzat când nu ar fi trebuit", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Setează Voce", "Set whisper model": "Setează modelul whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Setări", "Settings saved successfully!": "Setările au fost salvate cu succes!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt de Sistem", "Tags Generation": "", "Tags Generation Prompt": "Generarea de Etichete Prompt", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Apasă pentru a întrerupe", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Mulțumim pentru feedback!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Dezvoltatorii din spatele acestui plugin sunt voluntari pasionați din comunitate. Dacă considerați acest plugin util, vă rugăm să luați în considerare contribuția la dezvoltarea sa.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Clasamentul de evaluare se bazează pe sistemul de rating Elo și este actualizat în timp real.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Dimensiunea maximă a fișierului în MB. Dacă dimensiunea fișierului depășește această limită, fișierul nu va fi încărcat.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Numărul maxim de fișiere care pot fi utilizate simultan în chat. Dacă numărul de fișiere depășește această limită, fișierele nu vor fi încărcate.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Scorul ar trebui să fie o valoare între 0.0 (0%) și 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Temă", "Thinking...": "Gândește...", "This action cannot be undone. Do you wish to continue?": "Această acțiune nu poate fi anulată. Doriți să continuați?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Această opțiune va șterge toate fișierelor existente din colecție și le va înlocui cu fișierele nou încărcate.", "This response was generated by \"{{model}}\"": "Acest răspuns a fost generat de \"{{model}}\"", "This will delete": "Aceasta va șterge", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Mod Ecran Larg", "Won": "Câștigat", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Spațiu de Lucru", "Workspace Permissions": "", "Write": "Scrie", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 1d9fae43329..ef75209b63c 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Разрешить прерывание голоса во время вызова", "Allowed Endpoints": "", "Already have an account?": "У вас уже есть учетная запись?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Удивительный", "an assistant": "ассистент", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Соединение", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Обратитесь к администратору для получения доступа к WebUI", "Content": "Содержание", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Продолжить с Email", "Continue with LDAP": "Продолжить с LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Управляйте разделением текста сообщения для запросов TTS. 'Пунктуация' разделяет на предложения, 'абзацы' - разделяет на абзацы, а 'нет' сохраняет сообщение в виде одной строки.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Управление", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Управляет балансом между согласованностью и разнообразием выходных данных. Меньшее значение приведет к более сфокусированному и связному тексту. (По умолчанию: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Скопировано", "Copied shared chat URL to clipboard!": "Копирование в буфер обмена выполнено успешно!", "Copied to clipboard": "Скопировано в буфер обмена", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Включите блокировку памяти (mlock), чтобы предотвратить выгрузку данных модели из ОЗУ. Эта опция блокирует рабочий набор страниц модели в оперативной памяти, гарантируя, что они не будут выгружены на диск. Это может помочь поддерживать производительность, избегая ошибок страниц и обеспечивая быстрый доступ к данным.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Включите отображение памяти (mmap), чтобы загрузить данные модели. Эта опция позволяет системе использовать дисковое хранилище в качестве расширения оперативной памяти, обрабатывая дисковые файлы так, как если бы они находились в оперативной памяти. Это может улучшить производительность модели за счет более быстрого доступа к данным. Однако он может работать некорректно со всеми системами и занимать значительный объем дискового пространства.", "Enable Message Rating": "Разрешить оценку ответов", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Разрешить новые регистрации", "Enabled": "Включено", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Убедитесь, что ваш CSV-файл включает в себя 4 столбца в следующем порядке: Имя, Электронная почта, Пароль, Роль.", @@ -566,7 +566,7 @@ "Include": "Включать", "Include `--api-auth` flag when running stable-diffusion-webui": "Добавьте флаг '--api-auth' при запуске stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Информация", "Input commands": "Введите команды", "Install from Github URL": "Установка с URL-адреса Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Записать голос", "Redirecting you to Open WebUI Community": "Перенаправляем вас в сообщество OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Называйте себя \"User\" (например, \"User is learning Spanish\").", "References from": "", "Refused when it shouldn't have": "Отказано в доступе, когда это не должно было произойти", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Установить голос", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Настройки", "Settings saved successfully!": "Настройки успешно сохранены!", @@ -964,7 +964,7 @@ "System Prompt": "Системный промпт", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Нажмите, чтобы прервать", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Спасибо за вашу обратную связь!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчики этого плагина - увлеченные волонтеры из сообщества. Если вы считаете этот плагин полезным, пожалуйста, подумайте о том, чтобы внести свой вклад в его разработку.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максимальный размер файла в МБ. Если размер файла превысит это ограничение, файл не будет загружен.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максимальное количество файлов, которые могут быть использованы одновременно в чате. Если количество файлов превысит это ограничение, файлы не будут загружены.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оценка должна быть значением между 0,0 (0%) и 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температура модели. Повышение температуры заставит модель отвечать более творчески. (По умолчанию: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Тема", "Thinking...": "Думаю...", "This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Этот параметр определяет, сколько токенов сохраняется при обновлении контекста. Например, если установлено значение 2, будут сохранены два последних токена контекста разговора. Сохранение контекста может помочь сохранить непрерывность разговора, но может снизить способность реагировать на новые темы. (По умолчанию: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Этот параметр устанавливает максимальное количество токенов, которые модель может сгенерировать в своем ответе. Увеличение этого предела позволяет модели предоставлять более длинные ответы, но также может увеличить вероятность создания бесполезного или нерелевантного контента. (По умолчанию: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Эта опция удалит все существующие файлы в коллекции и заменит их вновь загруженными файлами.", "This response was generated by \"{{model}}\"": "", "This will delete": "Это приведет к удалению", @@ -1132,7 +1132,7 @@ "Why?": "Почему?", "Widescreen Mode": "Широкоэкранный режим", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Работает совместно с top-k. Более высокое значение (например, 0,95) приведет к созданию более разнообразного текста, а более низкое значение (например, 0,5) приведет к созданию более сфокусированного и консервативного текста. (По умолчанию: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Рабочее пространство", "Workspace Permissions": "Разрешения для Рабочего пространства", "Write": "", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index a25b916d1e6..3d114b22a36 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Povoliť prerušenie hlasu počas hovoru", "Allowed Endpoints": "", "Already have an account?": "Už máte účet?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "asistent", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Pripojenia", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktujte administrátora pre prístup k webovému rozhraniu.", "Content": "Obsah", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrola, ako sa text správy rozdeľuje pre požiadavky TTS. 'Punctuation' rozdeľuje text na vety, 'paragraphs' rozdeľuje text na odseky a 'none' ponecháva správu ako jeden celý reťazec.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Ovládacie prvky", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Skopírované", "Copied shared chat URL to clipboard!": "URL zdieľanej konverzácie skopírované do schránky!", "Copied to clipboard": "Skopírované do schránky", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Povoliť hodnotenie správ", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Povoliť nové registrácie", "Enabled": "Povolené", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Uistite sa, že váš CSV súbor obsahuje 4 stĺpce v tomto poradí: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "Zahrnúť", "Include `--api-auth` flag when running stable-diffusion-webui": "Zahrňte prepínač `--api-auth` pri spustení stable-diffusion-webui.", "Include `--api` flag when running stable-diffusion-webui": "Pri spustení stable-diffusion-webui zahrňte príznak `--api`.", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Info", "Input commands": "Vstupné príkazy", "Install from Github URL": "Inštalácia z URL adresy Githubu", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Nahrať hlas", "Redirecting you to Open WebUI Community": "Presmerovanie na komunitu OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odkazujte na seba ako na \"užívateľa\" (napr. \"Užívateľ sa učí španielsky\").", "References from": "Referencie z", "Refused when it shouldn't have": "Odmietnuté, keď nemalo byť.", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Nastaviť hlas", "Set whisper model": "Nastaviť model whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Nastavenia", "Settings saved successfully!": "Nastavenia boli úspešne uložené!", @@ -964,7 +964,7 @@ "System Prompt": "Systémový prompt", "Tags Generation": "", "Tags Generation Prompt": "Prompt na generovanie značiek", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Klepnite na prerušenie", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Ďakujeme za vašu spätnú väzbu!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojári stojaci za týmto pluginom sú zapálení dobrovoľníci z komunity. Ak považujete tento plugin za užitočný, zvážte príspevok na jeho vývoj.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotiaca tabuľka je založená na systéme hodnotenia Elo a aktualizuje sa v reálnom čase.", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maximálna veľkosť súboru v MB. Ak veľkosť súboru presiahne tento limit, súbor nebude nahraný.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maximálny počet súborov, ktoré je možné použiť naraz v chate. Ak počet súborov presiahne tento limit, súbory nebudú nahrané.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skóre by malo byť hodnotou medzi 0,0 (0%) a 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Téma", "Thinking...": "Premýšľam...", "This action cannot be undone. Do you wish to continue?": "Túto akciu nie je možné vrátiť späť. Prajete si pokračovať?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Táto voľba odstráni všetky existujúce súbory v kolekcii a nahradí ich novo nahranými súbormi.", "This response was generated by \"{{model}}\"": "Táto odpoveď bola vygenerovaná pomocou \"{{model}}\"", "This will delete": "Toto odstráni", @@ -1132,7 +1132,7 @@ "Why?": "Prečo?", "Widescreen Mode": "Režim širokouhlého zobrazenia", "Won": "Vyhral", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index 49f5ea3a9f3..a04d3375494 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Дозволи прекид гласа у позиву", "Allowed Endpoints": "", "Already have an account?": "Већ имате налог?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Невероватно", "an assistant": "помоћник", @@ -208,7 +208,7 @@ "Confirm your new password": "Потврди нову лозинку", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Везе", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Пишите админима за приступ на WebUI", "Content": "Садржај", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Настави са е-адресом", "Continue with LDAP": "Настави са ЛДАП-ом", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Контроле", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Копирано", "Copied shared chat URL to clipboard!": "Адреса дељеног ћаскања ископирана у оставу!", "Copied to clipboard": "Копирано у оставу", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Омогући нове пријаве", "Enabled": "Омогућено", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверите се да ваша CSV датотека укључује 4 колоне у овом редоследу: Име, Е-пошта, Лозинка, Улога.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Укључи `--api` заставицу при покретању stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Инфо", "Input commands": "Унеси наредбе", "Install from Github URL": "Инсталирај из Гитхуб УРЛ адресе", @@ -809,7 +809,7 @@ "Reasoning Effort": "Јачина размишљања", "Record voice": "Сними глас", "Redirecting you to Open WebUI Community": "Преусмеравање на OpenWebUI заједницу", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "Референце од", "Refused when it shouldn't have": "Одбијено када није требало", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Подеси глас", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Подешавања", "Settings saved successfully!": "Подешавања успешно сачувана!", @@ -964,7 +964,7 @@ "System Prompt": "Системски упит", "Tags Generation": "Стварање ознака", "Tags Generation Prompt": "Упит стварања ознака", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Хвала на вашем коментару!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Резултат треба да буде вредност између 0.0 (0%) и 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Тема", "Thinking...": "Размишљам...", "This action cannot be undone. Do you wish to continue?": "Ова радња се не може опозвати. Да ли желите наставити?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Ово ће обрисати", @@ -1132,7 +1132,7 @@ "Why?": "Зашто?", "Widescreen Mode": "Режим широког екрана", "Won": "Победа", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Радни простор", "Workspace Permissions": "", "Write": "Пиши", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index fefd3337d80..eecacb612e6 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Tillåt röstavbrott under samtal", "Allowed Endpoints": "Tillåtna Endpoints", "Already have an account?": "Har du redan ett konto?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativ till top_p, och syftar till att säkerställa en balans mellan kvalitet och variation. Parametern p representerar den minsta sannolikheten för att en token ska beaktas, i förhållande till sannolikheten för den mest sannolika token. Till exempel, med p=0.05 och den mest sannolika token som har en sannolikhet på 0.9, filtreras logiter med ett värde mindre än 0.045 bort. (Standard: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Alltid", "Amazing": "Fantastiskt", "an assistant": "en assistent", @@ -208,7 +208,7 @@ "Confirm your new password": "Bekräfta ditt nya lösenord", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Anslutningar", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakta administratören för att få åtkomst till WebUI", "Content": "Innehåll", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "Kopierad delad chatt-URL till urklipp!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktivera nya registreringar", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Se till att din CSV-fil innehåller fyra kolumner i denna ordning: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Inkludera flaggan `--api` när du kör stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Information", "Input commands": "Indatakommandon", "Install from Github URL": "Installera från Github-URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Spela in röst", "Redirecting you to Open WebUI Community": "Omdirigerar dig till OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referera till dig själv som \"Användare\" (t.ex. \"Användaren lär sig spanska\")", "References from": "", "Refused when it shouldn't have": "Avvisades när det inte borde ha gjort det", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Ange röst", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Inställningar", "Settings saved successfully!": "Inställningar sparades framgångsrikt!", @@ -964,7 +964,7 @@ "System Prompt": "Systeminstruktion", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling används för att minska effekten av mindre sannolika tokens i resultatet. Ett högre värde (t.ex. 2,0) minskar effekten mer, medan ett värde på 1,0 inaktiverar den här inställningen. (standard: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Tack för din feedback!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Batchstorleken avgör hur många textförfrågningar som behandlas samtidigt. En högre batchstorlek kan öka modellens prestanda och hastighet, men kräver också mer minne. (Standard: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Utvärderingens topplista är baserad på Elo-betygssystemet och uppdateras i realtid", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Betyget ska vara ett värde mellan 0.0 (0%) och 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Modellens temperatur. Om temperaturen höjs kommer modellen att svara mer kreativt. (Standard: 0,8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Tänker...", "This action cannot be undone. Do you wish to continue?": "Denna åtgärd kan inte ångras. Vill du fortsätta?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Det här alternativet styr hur många tokens som ska bevaras när kontexten uppdateras. Om det t.ex. är inställt på 2 bevaras de senaste 2 tokens i konversationskontexten. Att bevara kontexten kan bidra till att upprätthålla kontinuiteten i en konversation, men det kan minska möjligheten att svara på nya ämnen. (Standard: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Det här alternativet anger det maximala antalet tokens som modellen kan generera i sitt svar. Om du ökar denna gräns kan modellen ge längre svar, men det kan också öka sannolikheten för att ohjälpsamt eller irrelevant innehåll genereras. (Standard: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "Varför?", "Widescreen Mode": "Bredbildsläge", "Won": "Vann", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fungerar tillsammans med top-k. Ett högre värde (t.ex. 0,95) leder till en mer varierad text, medan ett lägre värde (t.ex. 0,5) ger en mer fokuserad och konservativ text. (Standard: 0,9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Arbetsyta", "Workspace Permissions": "", "Write": "Skriv", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 488142d81d8..d74d7c9224c 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "อนุญาตการแทรกเสียงในสาย", "Allowed Endpoints": "", "Already have an account?": "มีบัญชีอยู่แล้ว?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "ผู้ช่วย", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "การเชื่อมต่อ", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "ติดต่อผู้ดูแลระบบสำหรับการเข้าถึง WebUI", "Content": "เนื้อหา", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "การควบคุม", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "คัดลอก URL แชทที่แชร์ไปยังคลิปบอร์ดแล้ว!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "เปิดใช้งานการสมัครใหม่", "Enabled": "เปิดใช้งาน", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ตรวจสอบว่าไฟล์ CSV ของคุณมี 4 คอลัมน์ในลำดับนี้: ชื่อ, อีเมล, รหัสผ่าน, บทบาท", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "รวมแฟลก `--api-auth` เมื่อเรียกใช้ stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "รวมแฟลก `--api` เมื่อเรียกใช้ stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "ข้อมูล", "Input commands": "คำสั่งป้อนข้อมูล", "Install from Github URL": "ติดตั้งจาก URL ของ Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "บันทึกเสียง", "Redirecting you to Open WebUI Community": "กำลังเปลี่ยนเส้นทางคุณไปยังชุมชน OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "เรียกตัวเองว่า \"ผู้ใช้\" (เช่น \"ผู้ใช้กำลังเรียนภาษาสเปน\")", "References from": "", "Refused when it shouldn't have": "ปฏิเสธเมื่อไม่ควรทำ", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "ตั้งค่าเสียง", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "การตั้งค่า", "Settings saved successfully!": "บันทึกการตั้งค่าเรียบร้อยแล้ว!", @@ -964,7 +964,7 @@ "System Prompt": "ระบบพรอมต์", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "แตะเพื่อขัดจังหวะ", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "ขอบคุณสำหรับความคิดเห็นของคุณ!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "นักพัฒนาที่อยู่เบื้องหลังปลั๊กอินนี้เป็นอาสาสมัครที่มีชื่นชอบการแบ่งบัน หากคุณพบว่าปลั๊กอินนี้มีประโยชน์ โปรดพิจารณาสนับสนุนการพัฒนาของเขา", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "คะแนนควรอยู่ระหว่าง 0.0 (0%) ถึง 1.0 (100%)", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "ธีม", "Thinking...": "กำลังคิด...", "This action cannot be undone. Do you wish to continue?": "การกระทำนี้ไม่สามารถย้อนกลับได้ คุณต้องการดำเนินการต่อหรือไม่?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "สิ่งนี้จะลบ", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "โหมดหน้าจอกว้าง", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "พื้นที่ทำงาน", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index 9bebdd923b8..d0930120ca0 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "", "Allowed Endpoints": "", "Already have an account?": "", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", "Content": "", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "", "Copied shared chat URL to clipboard!": "", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "", "Input commands": "", "Install from Github URL": "", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "", "Redirecting you to Open WebUI Community": "", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "", "References from": "", "Refused when it shouldn't have": "", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "", "Settings saved successfully!": "", @@ -964,7 +964,7 @@ "System Prompt": "", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "", "Thinking...": "", "This action cannot be undone. Do you wish to continue?": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index 25fe0256c45..3e596941bf0 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Aramada Ses Kesintisine İzin Ver", "Allowed Endpoints": "İzin Verilen Uç Noktalar", "Already have an account?": "Zaten bir hesabınız mı var?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p'ye bir alternatif ve kalite ile çeşitlilik arasında bir denge sağlamayı amaçlar. p parametresi, en olası tokenin olasılığına göre, bir tokenin dikkate alınması için minimum olasılığı temsil eder. Örneğin, p=0.05 ve en olası tokenin 0.9 olasılığı ile 0.045'ten küçük bir değere sahip logitler filtrelenir. (Varsayılan: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "Harika", "an assistant": "bir asistan", @@ -208,7 +208,7 @@ "Confirm your new password": "Yeni parolanızı onaylayın", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Bağlantılar", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WebUI Erişimi için Yöneticiyle İletişime Geçin", "Content": "İçerik", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "E-posta ile devam edin", "Continue with LDAP": "LDAP ile devam edin", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Mesaj metninin TTS istekleri için nasıl bölüneceğini kontrol edin. 'Noktalama' cümlelere, 'paragraflar' paragraflara böler ve 'hiçbiri' mesajı tek bir dize olarak tutar.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Kontroller", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Çıktının tutarlılığı ve çeşitliliği arasındaki dengeyi kontrol eder. Daha düşük bir değer, daha odaklanmış ve tutarlı bir metinle sonuçlanacaktır. (Varsayılan: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Kopyalandı", "Copied shared chat URL to clipboard!": "Paylaşılan sohbet URL'si panoya kopyalandı!", "Copied to clipboard": "Panoya kopyalandı", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Mesaj Değerlendirmeyi Etkinleştir", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Perplexity'yi kontrol etmek için Mirostat örnekleme özelliğini etkinleştirin. (Varsayılan: 0, 0 = Devre Dışı, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Yeni Kayıtları Etkinleştir", "Enabled": "Etkin", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV dosyanızın şu sırayla 4 sütun içerdiğinden emin olun: İsim, E-posta, Şifre, Rol.", @@ -566,7 +566,7 @@ "Include": "Dahil etmek", "Include `--api-auth` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api-auth` bayrağını dahil edin", "Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api` bayrağını dahil edin", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Bilgi", "Input commands": "Giriş komutları", "Install from Github URL": "Github URL'sinden yükleyin", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Ses kaydı yap", "Redirecting you to Open WebUI Community": "OpenWebUI Topluluğuna yönlendiriliyorsunuz", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Kendinizden \"User\" olarak bahsedin (örneğin, \"User İspanyolca öğreniyor\")", "References from": "Referanslar arasından", "Refused when it shouldn't have": "Reddedilmemesi gerekirken reddedildi", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Ses Ayarla", "Set whisper model": "Fısıltı modelini ayarla", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Tekrarları önlemek için modelin ne kadar geriye bakacağını ayarlar. (Varsayılan: 64, 0 = devre dışı, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Oluşturma için kullanılacak rastgele sayı tohumunu(seed) ayarlar. Bunu belirli bir sayıya ayarlamak, modelin aynı prompt için aynı metni oluşturmasını sağlar. (Varsayılan: rastgele)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Bir sonraki tokenı oluşturmak için kullanılan bağlam penceresinin boyutunu ayarlar. (Varsayılan: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Kullanılacak durma dizilerini ayarlar. Bu desenle karşılaşıldığında, LLM metin oluşturmayı durduracak ve geri dönecektir. Birden çok durma deseni, bir modelfile'da birden çok ayrı durma parametresi belirterek ayarlanabilir.", "Settings": "Ayarlar", "Settings saved successfully!": "Ayarlar başarıyla kaydedildi!", @@ -964,7 +964,7 @@ "System Prompt": "Sistem Promptu", "Tags Generation": "Etiketler Oluşturma", "Tags Generation Prompt": "Etiketler Oluşturma Promptu", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Durdurmak için dokunun", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Geri bildiriminiz için teşekkürler!", "The Application Account DN you bind with for search": "Arama için bağlandığınız Uygulama Hesap DN'si", "The base to search for users": "Kullanıcıları aramak için temel", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Toplu boyut, kaç metin isteğinin aynı anda işlendiğini belirler. Daha yüksek bir toplu boyut, modelin performansını ve hızını artırabilir, ancak daha fazla bellek gerektirir. (Varsayılan: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Bu eklentinin arkasındaki geliştiriciler topluluktan tutkulu gönüllülerdir. Bu eklentinin yararlı olduğunu düşünüyorsanız, gelişimine katkıda bulunmayı düşünün.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "MB cinsinden maksimum dosya boyutu. Dosya boyutu bu sınırı aşarsa, dosya yüklenmeyecektir.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Sohbette aynı anda kullanılabilecek maksimum dosya sayısı. Dosya sayısı bu sınırı aşarsa, dosyalar yüklenmeyecektir.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Puan 0.0 (%0) ile 1.0 (%100) arasında bir değer olmalıdır.", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Tema", "Thinking...": "Düşünüyor...", "This action cannot be undone. Do you wish to continue?": "Bu eylem geri alınamaz. Devam etmek istiyor musunuz?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Bu seçenek, koleksiyondaki tüm mevcut dosyaları silecek ve bunları yeni yüklenen dosyalarla değiştirecek.", "This response was generated by \"{{model}}\"": "Bu yanıt \"{{model}}\" tarafından oluşturuldu", "This will delete": "Bu silinecek", @@ -1132,7 +1132,7 @@ "Why?": "Neden?", "Widescreen Mode": "Geniş Ekran Modu", "Won": "kazandı", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Çalışma Alanı", "Workspace Permissions": "Çalışma Alanı İzinleri", "Write": "", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index 57829d390e0..12280b97e57 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Дозволити переривання голосу під час виклику", "Allowed Endpoints": "Дозволені кінцеві точки", "Already have an account?": "Вже є обліковий запис?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Альтернатива параметру top_p, яка має на меті забезпечити баланс якості та різноманітності. Параметр p представляє мінімальну ймовірність для того, щоб токен був врахований, відносно ймовірності найбільш ймовірного токена. Наприклад, при p=0.05 і найбільш імовірному токені з ймовірністю 0.9, логіти зі значенням менше 0.045 будуть відфільтровані. (За замовчуванням: 0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Завжди", "Amazing": "Чудово", "an assistant": "асистента", @@ -208,7 +208,7 @@ "Confirm your new password": "Підтвердіть свій новий пароль", "Connect to your own OpenAI compatible API endpoints.": "Підключіться до своїх власних API-ендпоінтів, сумісних з OpenAI.", "Connections": "З'єднання", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Обмежує зусилля на міркування для моделей міркування. Застосовується лише до моделей від певних постачальників, які підтримують зусилля на міркування. (За замовчуванням: середній)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Зверніться до адміна для отримання доступу до WebUI", "Content": "Зміст", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "Продовжити з електронною поштою", "Continue with LDAP": "Продовжити з LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Керування розбиттям тексту повідомлення для TTS-запитів. 'Punctuation' розбиває на речення, 'paragraphs' розбиває на абзаци, а 'none' залишає повідомлення як один рядок.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "Керування", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Контролює баланс між зв'язністю та різноманітністю виходу. Нижче значення призведе до більш зосередженого та зв'язного тексту. (За замовчуванням: 5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Скопійовано", "Copied shared chat URL to clipboard!": "Скопійовано URL-адресу спільного чату в буфер обміну!", "Copied to clipboard": "Скопійовано в буфер обміну", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Увімкнути блокування пам'яті (mlock), щоб запобігти виведенню даних моделі з оперативної пам'яті. Цей параметр блокує робочий набір сторінок моделі в оперативній пам'яті, гарантуючи, що вони не будуть виведені на диск. Це може допомогти підтримувати продуктивність, уникати помилок сторінок та забезпечувати швидкий доступ до даних.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Увімкнути відображення пам'яті (mmap) для завантаження даних моделі. Цей параметр дозволяє системі використовувати дискове сховище як розширення оперативної пам'яті, трактуючи файли на диску, як ніби вони знаходяться в RAM. Це може покращити продуктивність моделі, дозволяючи швидший доступ до даних. Однак, він може не працювати коректно на всіх системах і може споживати значну кількість дискового простору.", "Enable Message Rating": "Увімкнути оцінку повідомлень", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Увімкнути вибірку Mirostat для контролю над непередбачуваністю. (За замовчуванням: 0, 0 = Вимкнено, 1 = Mirostat, 2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Дозволити нові реєстрації", "Enabled": "Увімкнено", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Переконайтеся, що ваш CSV-файл містить 4 колонки в такому порядку: Ім'я, Email, Пароль, Роль.", @@ -566,7 +566,7 @@ "Include": "Включити", "Include `--api-auth` flag when running stable-diffusion-webui": "Включіть прапорець `--api-auth` під час запуску stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Включіть прапор `--api` при запуску stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Впливає на швидкість, з якою алгоритм реагує на зворотній зв'язок від згенерованого тексту. Нижча швидкість навчання призведе до повільнішої корекції, тоді як вища швидкість навчання зробить алгоритм більш реакційним. (За замовчуванням: 0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Інфо", "Input commands": "Команди вводу", "Install from Github URL": "Встановіть з URL-адреси Github", @@ -809,7 +809,7 @@ "Reasoning Effort": "Зусилля на міркування", "Record voice": "Записати голос", "Redirecting you to Open WebUI Community": "Перенаправляємо вас до спільноти OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Знижує ймовірність генерації безглуздих відповідей. Вищі значення (напр., 100) призведуть до більш різноманітних відповідей, тоді як нижчі значення (напр., 10) будуть більш обережними. (За замовчуванням: 40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Називайте себе \"Користувач\" (напр., \"Користувач вивчає іспанську мову\")", "References from": "Посилання з", "Refused when it shouldn't have": "Відмовив, коли не мав би", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Встановити кількість робочих потоків, що використовуються для обробки інформації. Ця опція керує кількістю потоків, що використовуються для обробки надходження запитів одночасно. Збільшення цього значення може підвищити продуктивність при великій одночасності робіт, але також може споживати більше ресурсів CPU.", "Set Voice": "Встановити голос", "Set whisper model": "Встановити модель whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Встановлює, на скільки кроків назад модель повинна звертатися, щоб запобігти повторенням. (За замовчуванням: 64, 0 = вимкнено, -1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Встановлює насіння випадкового числа для генерації. Вказавши конкретне число, модель буде генерувати той самий текст для одного й того ж запиту. (За замовчуванням: випадкове)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "Встановлює розмір вікна контексту, яке використовується для генерації наступного токена. (За замовчуванням: 2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Встановлює послідовності зупинки, які будуть використовуватися. Коли зустрічається така послідовність, LLM припиняє генерацію тексту і повертає результат. Можна встановити кілька послідовностей зупинки, вказавши кілька окремих параметрів зупинки у файлі моделі.", "Settings": "Налаштування", "Settings saved successfully!": "Налаштування успішно збережено!", @@ -964,7 +964,7 @@ "System Prompt": "Системний промт", "Tags Generation": "Генерація тегів", "Tags Generation Prompt": "Підказка для генерації тегів", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Вибірка з відрізанням хвоста використовується для зменшення впливу малоймовірних токенів на результат. Вищі значення (напр., 2.0) зменшують цей вплив більше, в той час як значення 1.0 вимикає цю настройку. (За замовчуванням: 1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Натисніть, щоб перервати", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Дякуємо за ваш відгук!", "The Application Account DN you bind with for search": "DN облікового запису застосунку, з яким ви здійснюєте прив'язку для пошуку", "The base to search for users": "База для пошуку користувачів", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Розмір пакету визначає, скільки текстових запитів обробляється одночасно. Більший розмір пакету може підвищити продуктивність та швидкість моделі, але також вимагає більше пам'яті. (За замовчуванням: 512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Розробники цього плагіна - пристрасні волонтери зі спільноти. Якщо ви вважаєте цей плагін корисним, будь ласка, зробіть свій внесок у його розвиток.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Таблиця лідерів оцінки базується на системі рейтингу Ело і оновлюється в реальному часі.", "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-атрибут, який відповідає за пошту, яку користувачі використовують для входу.", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максимальний розмір файлу в МБ. Якщо розмір файлу перевищує цей ліміт, файл не буде завантажено.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максимальна кількість файлів, які можна використати одночасно в чаті. Якщо кількість файлів перевищує цей ліміт, файли не будуть завантажені.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оцінка повинна бути в діапазоні від 0.0 (0%) до 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температура моделі. Збільшення температури зробить відповіді моделі більш креативними. (За замовчуванням: 0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Тема", "Thinking...": "Думаю...", "This action cannot be undone. Do you wish to continue?": "Цю дію не можна скасувати. Ви бажаєте продовжити?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Ця опція керує кількістю токенів, яка зберігається під час оновлення контексту. Наприклад, якщо встановити як 2, останні 2 токени контексту розмови будуть збережені. Збереження контексту може допомогти підтримувати безперервність розмови, але воно також може зменшити здатність відповідати на нові теми. (За замовчуванням: 24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "Ця опція встановлює максимальну кількість токенів, які модель може генерувати в своєму відповіді. Збільшення цього обмеження дозволяє моделі надавати довші відповіді, але також може збільшити вірогідність генерації недопоможного чи невідповідного вмісту. (За замовчуванням: 128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Цей варіант видалить усі існуючі файли в колекції та замінить їх новими завантаженими файлами.", "This response was generated by \"{{model}}\"": "Цю відповідь згенеровано за допомогою \"{{model}}\"", "This will delete": "Це призведе до видалення", @@ -1132,7 +1132,7 @@ "Why?": "Чому?", "Widescreen Mode": "Широкоекранний режим", "Won": "Переможець", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Працює разом з top-k. Більше значення (напр., 0.95) приведе до більш різноманітного тексту, тоді як менше значення (напр., 0.5) згенерує більш зосереджений і консервативний текст. (За замовчуванням: 0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Робочий простір", "Workspace Permissions": "Дозволи робочого простору.", "Write": "Писати", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 136a8a8b396..d4308209b02 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "کال میں آواز کی مداخلت کی اجازت دیں", "Allowed Endpoints": "", "Already have an account?": "کیا پہلے سے اکاؤنٹ موجود ہے؟", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "معاون", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "کنکشنز", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "ویب یو آئی رسائی کے لیے ایڈمن سے رابطہ کریں", "Content": "مواد", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "TTS درخواستوں کے لیے پیغام کے متن کی تقسیم کو کنٹرول کریں 'Punctuation' جملوں میں تقسیم کرتا ہے، 'paragraphs' پیراگراف میں تقسیم کرتا ہے، اور 'none' پیغام کو ایک ہی سٹرنگ کے طور پر رکھتا ہے", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "کنٹرولز", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "کاپی کیا گیا", "Copied shared chat URL to clipboard!": "مشترکہ چیٹ یو آر ایل کلپ بورڈ میں نقل کر دیا گیا!", "Copied to clipboard": "کلپ بورڈ پر نقل کر دیا گیا", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "پیغام کی درجہ بندی فعال کریں", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "نئے سائن اپس کو فعال کریں", "Enabled": "فعال کردیا گیا ہے", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "یقینی بنائیں کہ آپ کی CSV فائل میں 4 کالم اس ترتیب میں شامل ہوں: نام، ای میل، پاس ورڈ، کردار", @@ -566,7 +566,7 @@ "Include": "شامل کریں", "Include `--api-auth` flag when running stable-diffusion-webui": "`--api-auth` پرچم کو چلانے کے وقت شامل کریں stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "اسٹیبل-ڈیفیوژن-ویب یو آئی چلانے کے دوران `--api` فلیگ شامل کریں", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "معلومات", "Input commands": "کمانڈز داخل کریں", "Install from Github URL": "گِٹ حب یو آر ایل سے انسٹال کریں", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "صوت ریکارڈ کریں", "Redirecting you to Open WebUI Community": "آپ کو اوپن ویب یو آئی کمیونٹی کی طرف ری ڈائریکٹ کیا جا رہا ہے", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "خود کو \"صارف\" کے طور پر حوالہ دیں (جیسے، \"صارف ہسپانوی سیکھ رہا ہے\")", "References from": "سے حوالہ جات", "Refused when it shouldn't have": "جب انکار نہیں ہونا چاہیے تھا، انکار کر دیا", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "آواز کے لئے سیٹ کریں", "Set whisper model": "وِسپر ماڈل مرتب کریں", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "ترتیبات", "Settings saved successfully!": "ترتیبات کامیابی کے ساتھ محفوظ ہو گئیں!", @@ -964,7 +964,7 @@ "System Prompt": "سسٹم پرومپٹ", "Tags Generation": "", "Tags Generation Prompt": "پرمپٹ کے لیے ٹیگز بنائیں", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "رکنے کے لئے ٹچ کریں", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "آپ کی رائے کا شکریہ!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "اس پلگ ان کے پیچھے موجود ڈویلپرز کمیونٹی کے پرجوش رضاکار ہیں اگر آپ کو یہ پلگ ان مددگار لگتا ہے تو برائے مہربانی اس کی ترقی میں اپنا حصہ ڈالنے پر غور کریں", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "تشخیصی لیڈربورڈ ایلو ریٹنگ سسٹم پر مبنی ہے اور یہ حقیقی وقت میں اپ ڈیٹ ہوتا ہے", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "زیادہ سے زیادہ فائل سائز ایم بی میں اگر فائل سائز اس حد سے تجاوز کر جاتا ہے، تو فائل اپ لوڈ نہیں ہوگی", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "چیٹ میں ایک وقت میں استعمال ہونے والی فائلوں کی زیادہ سے زیادہ تعداد اگر فائلوں کی تعداد اس حد سے تجاوز کر جائے تو فائلیں اپلوڈ نہیں کی جائیں گی", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "سکور کی قیمت کو 0.0 (0%) اور 1.0 (100%) کے درمیان ہونا چاہیے", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "تھیم", "Thinking...": "سوچ رہا ہے...", "This action cannot be undone. Do you wish to continue?": "یہ عمل واپس نہیں کیا جا سکتا کیا آپ جاری رکھنا چاہتے ہیں؟", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "اس اختیار سے مجموعہ میں موجود تمام فائلز حذف ہو جائیں گی اور ان کی جگہ نئی اپ لوڈ کردہ فائلز لی جائیں گی", "This response was generated by \"{{model}}\"": "یہ جواب \"{{model}}\" کے ذریعہ تیار کیا گیا", "This will delete": "یہ حذف کر دے گا", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "وائڈ اسکرین موڈ", "Won": "جیتا", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "ورک اسپیس", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 2bc35723b3e..5572fff5550 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "Cho phép gián đoạn giọng nói trong cuộc gọi", "Allowed Endpoints": "", "Already have an account?": "Bạn đã có tài khoản?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", "Amazing": "", "an assistant": "trợ lý", @@ -208,7 +208,7 @@ "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", "Connections": "Kết nối", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Liên hệ với Quản trị viên để được cấp quyền truy cập", "Content": "Nội dung", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "", "Continue with LDAP": "", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "Đã sao chép", "Copied shared chat URL to clipboard!": "Đã sao chép link chia sẻ trò chuyện vào clipboard!", "Copied to clipboard": "", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "", "Enable Message Rating": "Cho phép phản hồi, đánh giá", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Cho phép đăng ký mới", "Enabled": "Đã bật", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Đảm bảo tệp CSV của bạn bao gồm 4 cột theo thứ tự sau: Name, Email, Password, Role.", @@ -566,7 +566,7 @@ "Include": "", "Include `--api-auth` flag when running stable-diffusion-webui": "", "Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "Thông tin", "Input commands": "Nhập các câu lệnh", "Install from Github URL": "Cài đặt từ Github URL", @@ -809,7 +809,7 @@ "Reasoning Effort": "", "Record voice": "Ghi âm", "Redirecting you to Open WebUI Community": "Đang chuyển hướng bạn đến Cộng đồng OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Hãy coi bản thân mình như \"Người dùng\" (ví dụ: \"Người dùng đang học Tiếng Tây Ban Nha\")", "References from": "", "Refused when it shouldn't have": "Từ chối trả lời mà nhẽ không nên làm vậy", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "", "Set Voice": "Đặt Giọng nói", "Set whisper model": "", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "", "Settings": "Cài đặt", "Settings saved successfully!": "Cài đặt đã được lưu thành công!", @@ -964,7 +964,7 @@ "System Prompt": "Prompt Hệ thống (System Prompt)", "Tags Generation": "", "Tags Generation Prompt": "", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Chạm để ngừng", "Tasks": "", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "Cám ơn bạn đã gửi phản hồi!", "The Application Account DN you bind with for search": "", "The base to search for users": "", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Các nhà phát triển đằng sau plugin này là những tình nguyện viên nhiệt huyết của cộng đồng. Nếu bạn thấy plugin này hữu ích, vui lòng cân nhắc đóng góp cho sự phát triển của nó.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "", "The LDAP attribute that maps to the mail that users use to sign in.": "", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Điểm (score) phải có giá trị từ 0,0 (0%) đến 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Chủ đề", "Thinking...": "Đang suy luận...", "This action cannot be undone. Do you wish to continue?": "Hành động này không thể được hoàn tác. Bạn có muốn tiếp tục không?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "", "This response was generated by \"{{model}}\"": "", "This will delete": "Chat này sẽ bị xóa", @@ -1132,7 +1132,7 @@ "Why?": "", "Widescreen Mode": "Chế độ màn hình rộng", "Won": "", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Workspace", "Workspace Permissions": "", "Write": "", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index c64ce40da5a..c3fe7c09e47 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "允许通话中的打断语音", "Allowed Endpoints": "允许的端点", "Already have an account?": "已经拥有账号了?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p的替代方法,目标是在质量和多样性之间取得平衡。参数p表示一个Token相对于最有可能的Token所需的最低概率。比如,当p=0.05且最有可能的Token概率为0.9时,概率低于0.045的logits会被排除。(默认值:0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "保持", "Amazing": "很棒", "an assistant": "一个助手", @@ -208,7 +208,7 @@ "Confirm your new password": "确认新密码", "Connect to your own OpenAI compatible API endpoints.": "连接到你自己的与 OpenAI 兼容的 API 接口端点。", "Connections": "外部连接", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "限制模型的努力。仅适用于支持努力的特定提供商的模型。(默认值:中等)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "请联系管理员以获取访问权限", "Content": "内容", "Content Extraction Engine": "", @@ -218,9 +218,9 @@ "Continue with Email": "使用邮箱登录", "Continue with LDAP": "使用 LDAP 登录", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制消息文本如何拆分以用于 TTS 请求。“Punctuation”拆分为句子,“paragraphs”拆分为段落,“none”将消息保留为单个字符串。", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "控制生成文本中 Token 的重复。较高的值(例如 1.5)会更强烈地惩罚重复,而较低的值(例如 1.1)则更宽松。设置为 1 时,此功能被禁用。(默认值:1.1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "对话高级设置", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "控制输出的连贯性和多样性之间的平衡。较低的值将导致更集中和连贯的文本。(默认值:5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "已复制", "Copied shared chat URL to clipboard!": "已复制此对话分享链接至剪贴板!", "Copied to clipboard": "已复制到剪贴板", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "启用内存映射(mmap)以加载模型数据。此选项允许系统通过将磁盘文件视为在RAM中来使用磁盘存储作为RAM的扩展。这可以通过更快的数据访问来提高模型性能。然而,它可能无法在所有系统上正常工作,并且可能会消耗大量磁盘空间。", "Enable Message Rating": "启用回复评价", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "启用 Mirostat 采样以控制困惑度。(默认值:0,0 = 禁用,1 = Mirostat,2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "允许新用户注册", "Enabled": "启用", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。", @@ -566,7 +566,7 @@ "Include": "包括", "Include `--api-auth` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api-auth` 参数", "Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "影响算法对生成文本反馈的响应速度。较低的学习率会导致调整速度较慢,而较高的学习率会使算法更具响应性。(默认值:0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "信息", "Input commands": "输入命令", "Install from Github URL": "从 Github URL 安装", @@ -809,7 +809,7 @@ "Reasoning Effort": "推理努力", "Record voice": "录音", "Redirecting you to Open WebUI Community": "正在将您重定向到 OpenWebUI 社区", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "降低产生无意义答案的概率。数值越大(如 100),答案就越多样化,而数值越小(如 10),答案就越保守。(默认值:40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "使用\"User\" (用户) 来指代自己(例如:“User 正在学习西班牙语”)", "References from": "来自", "Refused when it shouldn't have": "无理拒绝", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "设置用于计算的工作线程数量。该选项可控制并发处理传入请求的线程数量。增加该值可以提高高并发工作负载下的性能,但也可能消耗更多的 CPU 资源。", "Set Voice": "设置音色", "Set whisper model": "设置 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "这个设置项用于调整对重复 Token 的抑制强度。当某个 Token 至少出现过一次后,系统会通过 flat bias 参数施加惩罚力度:数值越大(如 1.5),抑制重复的效果越强烈;数值较小(如 0.9)则相对宽容。当设为 0 时,系统会完全关闭这个重复抑制功能(默认值为 0)。", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "这个参数用于通过 scaling bias 机制抑制重复内容:当某些 Token 重复出现时,系统会根据它们已出现的次数自动施加惩罚。数值越大(如 1.5)惩罚力度越强,能更有效减少重复;数值较小(如 0.9)则允许更多重复。当设为 0 时完全关闭该功能,默认值设置为 1.1 保持适度抑制。", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "设置模型回溯多远以防止重复。(默认值:64,0 = 禁用,-1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "设置 random number seed 可以控制模型生成文本的随机起点。如果指定一个具体数字,当输入相同的提示语时,模型每次都会生成完全相同的文本内容(默认是随机选取 seed)。", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "设置用于生成下一个 Token 的上下文大小。(默认值:2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "设置要使用的停止序列。遇到这种模式时,大语言模型将停止生成文本并返回。可以通过在模型文件中指定多个单独的停止参数来设置多个停止模式。", "Settings": "设置", "Settings saved successfully!": "设置已成功保存!", @@ -964,7 +964,7 @@ "System Prompt": "系统提示词 (System Prompt)", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling 用于减少输出中可能性较低的Token的影响。数值越大(如 2.0),影响就越小,而数值为 1.0 则会禁用此设置。(默认值:1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "与模型交谈", "Tap to interrupt": "点击以中断", "Tasks": "任务", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "感谢您的反馈!", "The Application Account DN you bind with for search": "您所绑定用于搜索的 Application Account DN", "The base to search for users": "搜索用户的 Base", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "Batch size 决定了同时处理多少个文本请求。Batch size 越大,模型的性能和速度越快,但也需要更多内存。 (默认值:512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的背后开发者是社区中热情的志愿者。如果此插件有帮助到您,烦请考虑一下为它的开发做出贡献。", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "排行榜基于 Elo 评级系统并实时更新。", "The LDAP attribute that maps to the mail that users use to sign in.": "映射到用户登录时使用的邮箱的 LDAP 属性。", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "最大文件大小(MB)。如果文件大小超过此限制,则无法上传该文件。", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "在单次对话中可以使用的最大文件数。如果文件数超过此限制,则文件不会上传。", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "分值应介于 0.0(0%)和 1.0(100%)之间。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "模型的温度。提高温度将使模型更具创造性地回答。(默认值:0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "主题", "Thinking...": "正在思考...", "This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "该选项控制刷新上下文时保留多少Token。例如,如果设置为 2,就会保留对话上下文的最后 2 个Token。保留上下文有助于保持对话的连续性,但可能会降低回复新话题的能力。(默认值:24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "此选项设置了模型在回答中可以生成的最大 Token 数。增加这个限制可以让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。 (默认值:128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。", "This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成", "This will delete": "这将删除", @@ -1132,7 +1132,7 @@ "Why?": "为什么?", "Widescreen Mode": "宽屏模式", "Won": "获胜", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "与 top-k 一起工作。较高的值(例如0.95)将导致更具多样性的文本,而较低的值(例如0.5)将生成更集中和保守的文本。(默认值:0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "工作空间", "Workspace Permissions": "工作空间权限", "Write": "写作", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 161e73649a7..868b4fbbbcd 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "允許在通話中打斷語音", "Allowed Endpoints": "允許的端點", "Already have an account?": "已經有帳號了嗎?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "作為 top_p 的替代方案,旨在確保質量和多樣性的平衡。相對於最可能的 token 機率而言,參數 p 代表一個 token 被考慮在内的最低機率。例如,當 p=0.05 且最可能的 token 機率為 0.9 時,數值低於 0.045 的對數機率會被過濾掉。(預設值:0.0)", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "總是", "Amazing": "很棒", "an assistant": "一位助手", @@ -208,7 +208,7 @@ "Confirm your new password": "確認您的新密碼", "Connect to your own OpenAI compatible API endpoints.": "連線到您自己的 OpenAI 相容 API 端點。", "Connections": "連線", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "限制用於推理模型的推理程度 。僅適用於特定供應商提供的、支援推理程度設定的推理模型。(預設:中等)", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "請聯絡管理員以取得 WebUI 存取權限", "Content": "內容", "Content Extraction Engine": "內容擷取引擎", @@ -218,9 +218,9 @@ "Continue with Email": "使用 Email 繼續", "Continue with LDAP": "使用 LDAP 繼續", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制文字轉語音(TTS)請求中如何分割訊息文字。「標點符號」分割為句子,「段落」分割為段落,「無」則保持訊息為單一字串。", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "控制在生成文本中 token 序列的重複程度。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 1.1)則會較為寬鬆。 若數值為 1,則停用此功能。(預設值:1.1)", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", "Controls": "控制項", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "控制輸出的連貫性和多樣性之間的平衡。較低的值會產生更專注和連貫的文字。(預設:5.0)", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", "Copied": "已複製", "Copied shared chat URL to clipboard!": "已複製共用對話 URL 到剪貼簿!", "Copied to clipboard": "已複製到剪貼簿", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "啟用記憶體鎖定(mlock)以防止模型資料被換出 RAM。此選項會將模型的工作頁面集鎖定在 RAM 中,確保它們不會被換出到磁碟。這可以透過避免頁面錯誤和確保快速資料存取來維持效能。", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "啟用記憶體映射(mmap)以載入模型資料。此選項允許系統使用磁碟儲存作為 RAM 的延伸,透過將磁碟檔案視為在 RAM 中來處理。這可以透過允許更快的資料存取來改善模型效能。然而,它可能無法在所有系統上正常運作,並且可能會消耗大量磁碟空間。", "Enable Message Rating": "啟用訊息評分", - "Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "啟用 Mirostat 採樣以控制困惑度。(預設:0,0 = 停用,1 = Mirostat,2 = Mirostat 2.0)", + "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "允許新使用者註冊", "Enabled": "已啟用", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "請確認您的 CSV 檔案包含以下 4 個欄位,並按照此順序排列:姓名、電子郵件、密碼、角色。", @@ -566,7 +566,7 @@ "Include": "包含", "Include `--api-auth` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api-auth` 參數", "Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "影響演算法對回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使演算法更快回應。(預設:0.1)", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", "Info": "資訊", "Input commands": "輸入命令", "Install from Github URL": "從 GitHub URL 安裝", @@ -809,7 +809,7 @@ "Reasoning Effort": "推理程度", "Record voice": "錄音", "Redirecting you to Open WebUI Community": "正在將您重導向至 OpenWebUI 社群", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "降低產生無意義內容的機率。較高的值(例如 100)會給出更多樣化的答案,而較低的值(例如 10)會更保守。(預設:40)", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "以「使用者」稱呼自己(例如:「使用者正在學習西班牙文」)", "References from": "引用來源", "Refused when it shouldn't have": "不應拒絕時拒絕了", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "設定用於計算的工作執行緒數量。此選項控制使用多少執行緒來同時處理傳入的請求。增加此值可以在高併發工作負載下提升效能,但也可能消耗更多 CPU 資源。", "Set Voice": "設定語音", "Set whisper model": "設定 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "針對至少出現一次的 token,設定固定的負面偏見。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 0.9)則會較為寬鬆。 若數值為 0,則停用此功能。(預設值:0)", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "針對 token 設定比例偏差以懲罰重複,其基於 token 出現的次數。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 0.9)則會較為寬鬆。 若數值為 0,則停用此功能。(預設值:1.1)", - "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "設定模型向後查看以防止重複的距離。(預設:64,0 = 停用,-1 = num_ctx)", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "設定用於生成的隨機數種子。將其設定為特定數字會使模型對相同的提示詞產生相同的文字。(預設:隨機)", - "Sets the size of the context window used to generate the next token. (Default: 2048)": "設定用於生成下一個 token 的上下文視窗大小。(預設:2048)", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", + "Sets how far back for the model to look back to prevent repetition.": "", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", + "Sets the size of the context window used to generate the next token.": "", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "設定要使用的停止序列。當遇到此模式時,大型語言模型將停止生成文字並返回。可以在模型檔案中指定多個單獨的停止參數來設定多個停止模式。", "Settings": "設定", "Settings saved successfully!": "設定已成功儲存!", @@ -964,7 +964,7 @@ "System Prompt": "系統提示詞", "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "使用無尾採樣來減少較不可能的 token 對輸出的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 則停用此設定。(預設:1)", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "與模型對話", "Tap to interrupt": "點選以中斷", "Tasks": "任務", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "感謝您的回饋!", "The Application Account DN you bind with for search": "您綁定用於搜尋的應用程式帳號 DN", "The base to search for users": "搜尋使用者的基礎", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory. (Default: 512)": "批次大小決定一次處理多少文字請求。較高的批次大小可以提高模型的效能和速度,但也需要更多記憶體。(預設:512)", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "這個外掛背後的開發者是來自社群的熱情志願者。如果您覺得這個外掛很有幫助,請考慮為其開發做出貢獻。", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "評估排行榜基於 Elo 評分系統,並即時更新。", "The LDAP attribute that maps to the mail that users use to sign in.": "映射到使用者用於登入的使用者郵箱的 LDAP 屬性。", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "檔案大小上限(MB)。如果檔案大小超過此限制,檔案將不會被上傳。", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "對話中一次可使用的最大檔案數量。如果檔案數量超過此限制,檔案將不會被上傳。", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "分數應該是介於 0.0(0%)和 1.0(100%)之間的值。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "模型的溫度。提高溫度會使模型回答更具創意。(預設:0.8)", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "主題", "Thinking...": "正在思考...", "This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "此選項控制重新整理上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文可以幫助維持對話的連續性,但可能會降低回應新主題的能力。(預設:24)", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated. (Default: 128)": "此選項設定模型可以在其回應中生成的最大 token 數量。增加此限制允許模型提供更長的答案,但也可能增加產生無用或不相關內容的可能性。(預設:128)", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此選項將刪除集合中的所有現有檔案,並用新上傳的檔案取代它們。", "This response was generated by \"{{model}}\"": "此回應由「{{model}}」生成", "This will delete": "這將會刪除", @@ -1132,7 +1132,7 @@ "Why?": "為什麼?", "Widescreen Mode": "寬螢幕模式", "Won": "獲勝", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "與 top-k 一起運作。較高的值(例如 0.95)會產生更多樣化的文字,而較低的值(例如 0.5)會產生更專注和保守的文字。(預設:0.9)", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "工作區", "Workspace Permissions": "工作區權限", "Write": "寫入", From 43077b82b74a5b06352ebdc532bdfb3c06550749 Mon Sep 17 00:00:00 2001 From: Timmo Date: Thu, 27 Feb 2025 23:09:02 +0100 Subject: [PATCH 121/623] i18n: Update de-DE translation --- src/lib/i18n/locales/de-DE/translation.json | 66 ++++++++++----------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index ba1d2f31a3a..9583ad41d87 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -13,7 +13,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Unterhaltungstitel oder Websuchanfragen generieren.", "a user": "ein Benutzer", "About": "Über", - "Accept autocomplete generation / Jump to prompt variable": "", + "Accept autocomplete generation / Jump to prompt variable": "Automatische Vervollständigung akzeptieren / Zur Prompt-Variable springen", "Access": "Zugang", "Access Control": "Zugangskontrolle", "Accessible to all users": "Für alle Benutzer zugänglich", @@ -21,7 +21,7 @@ "Account Activation Pending": "Kontoaktivierung ausstehend", "Accurate information": "Präzise Information(en)", "Actions": "Aktionen", - "Activate": "", + "Activate": "Aktivieren", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Aktivieren Sie diesen Befehl, indem Sie \"/{{COMMAND}}\" in die Chat-Eingabe eingeben.", "Active Users": "Aktive Benutzer", "Add": "Hinzufügen", @@ -86,7 +86,7 @@ "Archive All Chats": "Alle Unterhaltungen archivieren", "Archived Chats": "Archivierte Unterhaltungen", "archived-chat-export": "archivierter-chat-export", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rüchgängig gemacht werden.", "Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?", "Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?", "Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Unterhaltungen wiederherstellen möchten?", @@ -95,14 +95,14 @@ "Artifacts": "Artefakte", "Ask a question": "Stellen Sie eine Frage", "Assistant": "Assistent", - "Attach file from knowledge": "", + "Attach file from knowledge": "Datei vom Wissensspeicher anhängen", "Attention to detail": "Aufmerksamkeit für Details", "Attribute for Mail": "Attribut für E-Mail", "Attribute for Username": "Attribut für Benutzername", "Audio": "Audio", "August": "August", "Authenticate": "Authentifizieren", - "Authentication": "", + "Authentication": "Authentifizierung", "Auto-Copy Response to Clipboard": "Antwort automatisch in die Zwischenablage kopieren", "Auto-playback response": "Antwort automatisch abspielen", "Autocomplete Generation": "Automatische Vervollständigung", @@ -129,9 +129,9 @@ "Bocha Search API Key": "", "Brave Search API Key": "Brave Search API-Schlüssel", "By {{name}}": "Von {{name}}", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "Embedding und Retrieval umgehen", "Bypass SSL verification for Websites": "SSL-Überprüfung für Webseiten umgehen", - "Calendar": "", + "Calendar": "Kalender", "Call": "Anrufen", "Call feature is not supported when using Web STT engine": "Die Anruffunktion wird nicht unterstützt, wenn die Web-STT-Engine verwendet wird.", "Camera": "Kamera", @@ -170,7 +170,7 @@ "Click here to": "Klicken Sie hier, um", "Click here to download user import template file.": "Klicken Sie hier, um die Vorlage für den Benutzerimport herunterzuladen.", "Click here to learn more about faster-whisper and see the available models.": "Klicken Sie hier, um mehr über faster-whisper zu erfahren und die verfügbaren Modelle zu sehen.", - "Click here to see available models.": "", + "Click here to see available models.": "Klicken Sie hier, um die verfügbaren Modelle anzuzeigen.", "Click here to select": "Klicke Sie zum Auswählen hier", "Click here to select a csv file.": "Klicken Sie zum Auswählen einer CSV-Datei hier.", "Click here to select a py file.": "Klicken Sie zum Auswählen einer py-Datei hier.", @@ -183,7 +183,7 @@ "Clone of {{TITLE}}": "Klon von {{TITLE}}", "Close": "Schließen", "Code execution": "Codeausführung", - "Code Execution": "", + "Code Execution": "Codeausführung", "Code Execution Engine": "", "Code Execution Timeout": "", "Code formatted successfully": "Code erfolgreich formatiert", @@ -206,7 +206,7 @@ "Confirm Password": "Passwort bestätigen", "Confirm your action": "Bestätigen Sie Ihre Aktion.", "Confirm your new password": "Neues Passwort bestätigen", - "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAI compatible API endpoints.": "Verbinden Sie sich zu Ihre OpenAI-kompatiblen Endpunkte.", "Connections": "Verbindungen", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Beschränkt den Aufwand für das Schlussfolgern bei Schlussfolgerungsmodellen. Nur anwendbar auf Schlussfolgerungsmodelle von spezifischen Anbietern, die den Schlussfolgerungsaufwand unterstützen. (Standard: medium)", "Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche", @@ -230,7 +230,7 @@ "Copy Link": "Link kopieren", "Copy to clipboard": "In die Zwischenablage kopieren", "Copying to clipboard was successful!": "Das Kopieren in die Zwischenablage war erfolgreich!", - "CORS must be properly configured by the provider to allow requests from Open WebUI.": "", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "CORS muss vom Anbieter korrekt konfiguriert werden, um Anfragen von Open WebUI zuzulassen.", "Create": "Erstellen", "Create a knowledge base": "Wissensspeicher erstellen", "Create a model": "Modell erstellen", @@ -248,7 +248,7 @@ "Current Model": "Aktuelles Modell", "Current Password": "Aktuelles Passwort", "Custom": "Benutzerdefiniert", - "Danger Zone": "", + "Danger Zone": "Gefahrenzone", "Dark": "Dunkel", "Database": "Datenbank", "December": "Dezember", @@ -286,9 +286,9 @@ "Describe your knowledge base and objectives": "Beschreibe deinen Wissensspeicher und deine Ziele", "Description": "Beschreibung", "Didn't fully follow instructions": "Nicht genau den Answeisungen gefolgt", - "Direct Connections": "", - "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", - "Direct Connections settings updated": "", + "Direct Connections": "Direktverbindungen", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Direktverbindungen ermöglichen es Benutzern, sich mit ihren eigenen OpenAI-kompatiblen API-Endpunkten zu verbinden.", + "Direct Connections settings updated": "Direktverbindungs-Einstellungen aktualisiert", "Disabled": "Deaktiviert", "Discover a function": "Entdecken Sie weitere Funktionen", "Discover a model": "Entdecken Sie weitere Modelle", @@ -321,14 +321,14 @@ "Don't like the style": "schlechter Schreibstil", "Done": "Erledigt", "Download": "Exportieren", - "Download as SVG": "", + "Download as SVG": "Exportieren als SVG", "Download canceled": "Exportierung abgebrochen", "Download Database": "Datenbank exportieren", "Drag and drop a file to upload or select a file to view": "Ziehen Sie eine Datei zum Hochladen oder wählen Sie eine Datei zum Anzeigen aus", "Draw": "Zeichnen", "Drop any files here to add to the conversation": "Ziehen Sie beliebige Dateien hierher, um sie der Unterhaltung hinzuzufügen", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "z. B. '30s','10m'. Gültige Zeiteinheiten sind 's', 'm', 'h'.", - "e.g. 60": "", + "e.g. 60": "z. B. 60", "e.g. A filter to remove profanity from text": "z. B. Ein Filter, um Schimpfwörter aus Text zu entfernen", "e.g. My Filter": "z. B. Mein Filter", "e.g. My Tools": "z. B. Meine Werkzeuge", @@ -378,7 +378,7 @@ "Enter description": "Geben Sie eine Beschreibung ein", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", - "Enter domains separated by commas (e.g., example.com,site.org)": "", + "Enter domains separated by commas (e.g., example.com,site.org)": "Geben Sie die Domains durch Kommas separiert ein (z.B. example.com,site.org)", "Enter Exa API Key": "Geben Sie den Exa-API-Schlüssel ein", "Enter Github Raw URL": "Geben Sie die Github Raw-URL ein", "Enter Google PSE API Key": "Geben Sie den Google PSE-API-Schlüssel ein", @@ -464,7 +464,7 @@ "Failed to save models configuration": "Fehler beim Speichern der Modellkonfiguration", "Failed to update settings": "Fehler beim Aktualisieren der Einstellungen", "Failed to upload file.": "Fehler beim Hochladen der Datei.", - "Features": "", + "Features": "Funktionalitäten", "Features Permissions": "Funktionen-Berechtigungen", "February": "Februar", "Feedback History": "Feedback-Verlauf", @@ -613,24 +613,24 @@ "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Leer lassen, um alle Modelle vom \"{{URL}}/models\"-Endpunkt einzuschließen", "Leave empty to include all models or select specific models": "Leer lassen, um alle Modelle einzuschließen oder spezifische Modelle auszuwählen", "Leave empty to use the default prompt, or enter a custom prompt": "Leer lassen, um den Standardprompt zu verwenden, oder geben Sie einen benutzerdefinierten Prompt ein", - "Leave model field empty to use the default model.": "", - "License": "", + "Leave model field empty to use the default model.": "Leer lassen, um das Standardmodell zu verwenden.", + "License": "Lizenz", "Light": "Hell", "Listening...": "Höre zu...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "LLMs können Fehler machen. Überprüfe wichtige Informationen.", "Loader": "", - "Loading Kokoro.js...": "", + "Loading Kokoro.js...": "Lade Kokoro.js...", "Local": "Lokal", "Local Models": "Lokale Modelle", - "Location access not allowed": "", + "Location access not allowed": "Standortzugriff nicht erlaub", "Lost": "Verloren", "LTR": "LTR", "Made by Open WebUI Community": "Von der OpenWebUI-Community", "Make sure to enclose them with": "Umschließe Variablen mit", "Make sure to export a workflow.json file as API format from ComfyUI.": "Stellen Sie sicher, dass sie eine workflow.json-Datei im API-Format von ComfyUI exportieren.", "Manage": "Verwalten", - "Manage Direct Connections": "", + "Manage Direct Connections": "Direkte Verbindungen verwalten", "Manage Models": "Modelle verwalten", "Manage Ollama": "Ollama verwalten", "Manage Ollama API Connections": "Ollama-API-Verbindungen verwalten", @@ -697,7 +697,7 @@ "No HTML, CSS, or JavaScript content found.": "Keine HTML-, CSS- oder JavaScript-Inhalte gefunden.", "No inference engine with management support found": "Keine Inferenz-Engine mit Management-Unterstützung gefunden", "No knowledge found": "Kein Wissen gefunden", - "No memories to clear": "", + "No memories to clear": "Keine Erinnerungen zum Entfernen", "No model IDs": "Keine Modell-IDs", "No models found": "Keine Modelle gefunden", "No models selected": "Keine Modelle ausgewählt", @@ -776,7 +776,7 @@ "Plain text (.txt)": "Nur Text (.txt)", "Playground": "Testumgebung", "Please carefully review the following warnings:": "Bitte überprüfen Sie die folgenden Warnungen sorgfältig:", - "Please do not close the settings page while loading the model.": "", + "Please do not close the settings page while loading the model.": "Bitte schließen die Einstellungen-Seite nicht wärend das Modell lädt.", "Please enter a prompt": "Bitte geben Sie einen Prompt ein", "Please fill in all fields.": "Bitte füllen Sie alle Felder aus.", "Please select a model first.": "Bitte wählen Sie zuerst ein Modell aus.", @@ -821,7 +821,7 @@ "Rename": "Umbenennen", "Reorder Models": "Modelle neu anordnen", "Repeat Last N": "Wiederhole die letzten N", - "Repeat Penalty (Ollama)": "", + "Repeat Penalty (Ollama)": "Wiederholungsstrafe (Ollama)", "Reply in Thread": "Im Thread antworten", "Request Mode": "Anforderungsmodus", "Reranking Model": "Reranking-Modell", @@ -885,7 +885,7 @@ "Select a pipeline": "Wählen Sie eine Pipeline", "Select a pipeline url": "Wählen Sie eine Pipeline-URL", "Select a tool": "Wählen Sie ein Werkzeug", - "Select an auth method": "", + "Select an auth method": "Wählen Sie eine Authentifizierungsmethode", "Select an Ollama instance": "Wählen Sie eine Ollama-Instanz", "Select Engine": "Engine auswählen", "Select Knowledge": "Wissensdatenbank auswählen", @@ -965,9 +965,9 @@ "Tags Generation": "Tag-Generierung", "Tags Generation Prompt": "Prompt für Tag-Generierung", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)", - "Talk to model": "", + "Talk to model": "Zu einem Modell sprechen", "Tap to interrupt": "Zum Unterbrechen tippen", - "Tasks": "", + "Tasks": "Aufgaben", "Tavily API Key": "Tavily-API-Schlüssel", "Tell us more:": "Erzähl uns mehr", "Temperature": "Temperatur", @@ -1005,7 +1005,7 @@ "This will reset the knowledge base and sync all files. Do you wish to continue?": "Dadurch wird die Wissensdatenbank zurückgesetzt und alle Dateien synchronisiert. Möchten Sie fortfahren?", "Thorough explanation": "Ausführliche Erklärung", "Thought for {{DURATION}}": "Nachgedacht für {{DURATION}}", - "Thought for {{DURATION}} seconds": "", + "Thought for {{DURATION}} seconds": "Nachgedacht für {{DURATION}} Sekunden", "Tika": "Tika", "Tika Server URL required.": "Tika-Server-URL erforderlich.", "Tiktoken": "Tiktoken", @@ -1014,7 +1014,7 @@ "Title (e.g. Tell me a fun fact)": "Titel (z. B. Erzähl mir einen lustigen Fakt)", "Title Auto-Generation": "Unterhaltungstitel automatisch generieren", "Title cannot be an empty string.": "Titel darf nicht leer sein.", - "Title Generation": "", + "Title Generation": "Titelgenerierung", "Title Generation Prompt": "Prompt für Titelgenerierung", "TLS": "TLS", "To access the available model names for downloading,": "Um auf die verfügbaren Modellnamen zuzugreifen,", @@ -1155,6 +1155,6 @@ "Your account status is currently pending activation.": "Ihr Kontostatus ist derzeit ausstehend und wartet auf Aktivierung.", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Ihr gesamter Beitrag geht direkt an den Plugin-Entwickler; Open WebUI behält keinen Prozentsatz ein. Die gewählte Finanzierungsplattform kann jedoch eigene Gebühren haben.", "Youtube": "YouTube", - "Youtube Language": "", + "Youtube Language": "YouTube Sprache", "Youtube Proxy URL": "" } From 90fe437e2949ae83266c561ef6608c1df47a0c46 Mon Sep 17 00:00:00 2001 From: Timmo Date: Thu, 27 Feb 2025 23:23:05 +0100 Subject: [PATCH 122/623] i18n: Update de-DE translation - fix typos --- src/lib/i18n/locales/de-DE/translation.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 9583ad41d87..3aeac5fa4c4 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -86,7 +86,7 @@ "Archive All Chats": "Alle Unterhaltungen archivieren", "Archived Chats": "Archivierte Unterhaltungen", "archived-chat-export": "archivierter-chat-export", - "Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rüchgängig gemacht werden.", + "Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rückgängig gemacht werden.", "Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?", "Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?", "Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Unterhaltungen wiederherstellen möchten?", @@ -95,7 +95,7 @@ "Artifacts": "Artefakte", "Ask a question": "Stellen Sie eine Frage", "Assistant": "Assistent", - "Attach file from knowledge": "Datei vom Wissensspeicher anhängen", + "Attach file from knowledge": "Datei aus Wissensspeicher anhängen", "Attention to detail": "Aufmerksamkeit für Details", "Attribute for Mail": "Attribut für E-Mail", "Attribute for Username": "Attribut für Benutzername", @@ -206,7 +206,7 @@ "Confirm Password": "Passwort bestätigen", "Confirm your action": "Bestätigen Sie Ihre Aktion.", "Confirm your new password": "Neues Passwort bestätigen", - "Connect to your own OpenAI compatible API endpoints.": "Verbinden Sie sich zu Ihre OpenAI-kompatiblen Endpunkte.", + "Connect to your own OpenAI compatible API endpoints.": "Verbinden Sie sich zu Ihren OpenAI-kompatiblen Endpunkten.", "Connections": "Verbindungen", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Beschränkt den Aufwand für das Schlussfolgern bei Schlussfolgerungsmodellen. Nur anwendbar auf Schlussfolgerungsmodelle von spezifischen Anbietern, die den Schlussfolgerungsaufwand unterstützen. (Standard: medium)", "Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche", @@ -776,7 +776,7 @@ "Plain text (.txt)": "Nur Text (.txt)", "Playground": "Testumgebung", "Please carefully review the following warnings:": "Bitte überprüfen Sie die folgenden Warnungen sorgfältig:", - "Please do not close the settings page while loading the model.": "Bitte schließen die Einstellungen-Seite nicht wärend das Modell lädt.", + "Please do not close the settings page while loading the model.": "Bitte schließen die Einstellungen-Seite nicht, während das Modell lädt.", "Please enter a prompt": "Bitte geben Sie einen Prompt ein", "Please fill in all fields.": "Bitte füllen Sie alle Felder aus.", "Please select a model first.": "Bitte wählen Sie zuerst ein Modell aus.", From 01435997b999d54ab75020b8998268a8798ed031 Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Fri, 28 Feb 2025 06:29:53 +0800 Subject: [PATCH 123/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 36 ++++++++++----------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 868b4fbbbcd..fd84d21be2d 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "允許在通話中打斷語音", "Allowed Endpoints": "允許的端點", "Already have an account?": "已經有帳號了嗎?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方案,旨在確保品質與多樣性之間的平衡。參數 p 代表一個 token 被考慮的最低機率,相對於最有可能 token 的機率。例如,當 p=0.05 且最有可能 token 的機率為 0.9 時,機率小於 0.045 的 logits 將被過濾掉。", "Always": "總是", "Amazing": "很棒", "an assistant": "一位助手", @@ -208,7 +208,7 @@ "Confirm your new password": "確認您的新密碼", "Connect to your own OpenAI compatible API endpoints.": "連線到您自己的 OpenAI 相容 API 端點。", "Connections": "連線", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "限制推理模型的推理程度。僅適用於特定供應商支援推理程度的推理模型。", "Contact Admin for WebUI Access": "請聯絡管理員以取得 WebUI 存取權限", "Content": "內容", "Content Extraction Engine": "內容擷取引擎", @@ -218,9 +218,9 @@ "Continue with Email": "使用 Email 繼續", "Continue with LDAP": "使用 LDAP 繼續", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制文字轉語音(TTS)請求中如何分割訊息文字。「標點符號」分割為句子,「段落」分割為段落,「無」則保持訊息為單一字串。", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "控制生成文本中 token 序列的重複程度。較高的值(例如 1.5)會更強烈地懲罰重複,而較低的值(例如 1.1)會更寬容。設為 1 時,此功能將停用。", "Controls": "控制項", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "控制輸出結果的連貫性與多樣性之間的平衡。數值越低會產生更集中且連貫的文本。", "Copied": "已複製", "Copied shared chat URL to clipboard!": "已複製共用對話 URL 到剪貼簿!", "Copied to clipboard": "已複製到剪貼簿", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "啟用記憶體鎖定(mlock)以防止模型資料被換出 RAM。此選項會將模型的工作頁面集鎖定在 RAM 中,確保它們不會被換出到磁碟。這可以透過避免頁面錯誤和確保快速資料存取來維持效能。", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "啟用記憶體映射(mmap)以載入模型資料。此選項允許系統使用磁碟儲存作為 RAM 的延伸,透過將磁碟檔案視為在 RAM 中來處理。這可以透過允許更快的資料存取來改善模型效能。然而,它可能無法在所有系統上正常運作,並且可能會消耗大量磁碟空間。", "Enable Message Rating": "啟用訊息評分", - "Enable Mirostat sampling for controlling perplexity.": "", + "Enable Mirostat sampling for controlling perplexity.": "啟用 Mirostat 採樣以控制 perplexity。", "Enable New Sign Ups": "允許新使用者註冊", "Enabled": "已啟用", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "請確認您的 CSV 檔案包含以下 4 個欄位,並按照此順序排列:姓名、電子郵件、密碼、角色。", @@ -566,7 +566,7 @@ "Include": "包含", "Include `--api-auth` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api-auth` 參數", "Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影響算法對生成文本回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使算法反應更靈敏。", "Info": "資訊", "Input commands": "輸入命令", "Install from Github URL": "從 GitHub URL 安裝", @@ -809,7 +809,7 @@ "Reasoning Effort": "推理程度", "Record voice": "錄音", "Redirecting you to Open WebUI Community": "正在將您重導向至 OpenWebUI 社群", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "降低產生無意義內容的機率。較高的值(例如 100)會產生更多樣化的答案,而較低的值(例如 10)會更保守。", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "以「使用者」稱呼自己(例如:「使用者正在學習西班牙文」)", "References from": "引用來源", "Refused when it shouldn't have": "不應拒絕時拒絕了", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "設定用於計算的工作執行緒數量。此選項控制使用多少執行緒來同時處理傳入的請求。增加此值可以在高併發工作負載下提升效能,但也可能消耗更多 CPU 資源。", "Set Voice": "設定語音", "Set whisper model": "設定 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets how far back for the model to look back to prevent repetition.": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", - "Sets the size of the context window used to generate the next token.": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "對至少出現過一次的 token 設定統一的偏差值。較高的值(例如 1.5)會更強烈地懲罰重複,而較低的值(例如 0.9)會更寬容。設為 0 時,此功能將停用。", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "根據 token 出現的次數,設定一個縮放偏差值來懲罰重複。較高的值(例如 1.5)會更強烈地懲罰重複,而較低的值(例如 0.9)會更寬容。設為 0 時,此功能將停用。", + "Sets how far back for the model to look back to prevent repetition.": "設定模型回溯多遠以防止重複。", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "設定用於生成的隨機數種子。將其設定為特定數字將使模型針對相同的提示生成相同的文本。", + "Sets the size of the context window used to generate the next token.": "設定用於生成下一個 token 的上下文窗口大小。", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "設定要使用的停止序列。當遇到此模式時,大型語言模型將停止生成文字並返回。可以在模型檔案中指定多個單獨的停止參數來設定多個停止模式。", "Settings": "設定", "Settings saved successfully!": "設定已成功儲存!", @@ -964,7 +964,7 @@ "System Prompt": "系統提示詞", "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "尾部自由採樣用於減少輸出結果中較低機率 token 的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 時會停用此設定。", "Talk to model": "與模型對話", "Tap to interrupt": "點選以中斷", "Tasks": "任務", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "感謝您的回饋!", "The Application Account DN you bind with for search": "您綁定用於搜尋的應用程式帳號 DN", "The base to search for users": "搜尋使用者的基礎", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "批次大小決定一次處理多少文本請求。較高的批次大小可以提高模型的效能和速度,但也需要更多記憶體。", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "這個外掛背後的開發者是來自社群的熱情志願者。如果您覺得這個外掛很有幫助,請考慮為其開發做出貢獻。", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "評估排行榜基於 Elo 評分系統,並即時更新。", "The LDAP attribute that maps to the mail that users use to sign in.": "映射到使用者用於登入的使用者郵箱的 LDAP 屬性。", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "檔案大小上限(MB)。如果檔案大小超過此限制,檔案將不會被上傳。", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "對話中一次可使用的最大檔案數量。如果檔案數量超過此限制,檔案將不會被上傳。", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "分數應該是介於 0.0(0%)和 1.0(100%)之間的值。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "模型的溫度。提高溫度會使模型更具創造性地回答。", "Theme": "主題", "Thinking...": "正在思考...", "This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此選項控制在刷新上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文有助於保持對話的連貫性,但也可能降低對新主題的回應能力。", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "此選項設定模型在其回應中可以生成的最大 token 數量。增加此限制允許模型提供更長的答案,但也可能增加生成無用或不相關內容的可能性。", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此選項將刪除集合中的所有現有檔案,並用新上傳的檔案取代它們。", "This response was generated by \"{{model}}\"": "此回應由「{{model}}」生成", "This will delete": "這將會刪除", @@ -1132,7 +1132,7 @@ "Why?": "為什麼?", "Widescreen Mode": "寬螢幕模式", "Won": "獲勝", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "與 top-k 一起使用。較高的值(例如 0.95)將產生更多樣化的文本,而較低的值(例如 0.5)將生成更集中和保守的文本。", "Workspace": "工作區", "Workspace Permissions": "工作區權限", "Write": "寫入", From 77579f55098ff477ae8e40e04ffd04ec6db839ec Mon Sep 17 00:00:00 2001 From: hurxxxx <75428618+hurxxxx@users.noreply.github.com> Date: Fri, 28 Feb 2025 08:42:02 +0900 Subject: [PATCH 124/623] chore: cleanup --- src/lib/utils/onedrive-file-picker.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index 2f4910a2dc0..e48277ac06f 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -1,7 +1,7 @@ import { PublicClientApplication } from '@azure/msal-browser'; import type { PopupRequest } from '@azure/msal-browser'; -let CLIENT_ID = '521ada3e-6154-4a35-b9d3-51faac8ac944'; +let CLIENT_ID = ''; async function getCredentials() { if (CLIENT_ID) return; From d0ddb0637e3651635fb1b720e55b414c8af340b4 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Feb 2025 16:34:05 -0800 Subject: [PATCH 125/623] enh: web embed bypass embedding and retrieval support --- backend/open_webui/retrieval/utils.py | 7 +++++++ backend/open_webui/routers/retrieval.py | 11 ++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index b6253e63cc2..029a33a56cd 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -414,6 +414,13 @@ def get_sources_from_files( ] ], } + elif file.get("file").get("data"): + context = { + "documents": [[file.get("file").get("data", {}).get("content")]], + "metadatas": [ + [file.get("file").get("data", {}).get("metadata", {})] + ], + } else: collection_names = [] if file.get("type") == "collection": diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 5943ae33fd3..9a0855f25d7 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -1187,9 +1187,13 @@ def process_web( content = " ".join([doc.page_content for doc in docs]) log.debug(f"text_content: {content}") - save_docs_to_vector_db( - request, docs, collection_name, overwrite=True, user=user - ) + + if not request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: + save_docs_to_vector_db( + request, docs, collection_name, overwrite=True, user=user + ) + else: + collection_name = None return { "status": True, @@ -1201,6 +1205,7 @@ def process_web( }, "meta": { "name": form_data.url, + "source": form_data.url, }, }, } From 34e3cb688147a1143963a120017d6e98292ce121 Mon Sep 17 00:00:00 2001 From: dannyl1u Date: Thu, 27 Feb 2025 23:13:09 -0800 Subject: [PATCH 126/623] logit bias: update tooltip message --- .../components/chat/Settings/Advanced/AdvancedParams.svelte | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte index 5e53b999e48..8d59fdef23e 100644 --- a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte +++ b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte @@ -302,7 +302,7 @@
From 90aa29528c0360dc0a93a4c9c6ccc64d5d1b3102 Mon Sep 17 00:00:00 2001 From: dannyl1u Date: Thu, 27 Feb 2025 23:13:30 -0800 Subject: [PATCH 127/623] logit_bias: handle comma seperated values --- backend/open_webui/utils/middleware.py | 7 ++++--- backend/open_webui/utils/misc.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 359ef775c8c..d52be248797 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -68,6 +68,7 @@ get_last_user_message, get_last_assistant_message, prepend_to_first_user_message_content, + convert_logit_bias_input_to_json ) from open_webui.utils.tools import get_tools from open_webui.utils.plugin import load_function_module_by_id @@ -593,9 +594,9 @@ def apply_params_to_form_data(form_data, model): form_data["reasoning_effort"] = params["reasoning_effort"] if "logit_bias" in params: try: - form_data["logit_bias"] = json.loads(params["logit_bias"]) - except json.JSONDecodeError: - print("Invalid JSON format for logit_bias") + form_data["logit_bias"] = json.loads(convert_logit_bias_input_to_json(params["logit_bias"])) + except Exception as e: + print(f"Error parsing logit_bias: {e}") return form_data diff --git a/backend/open_webui/utils/misc.py b/backend/open_webui/utils/misc.py index f79b6268437..8ab74331692 100644 --- a/backend/open_webui/utils/misc.py +++ b/backend/open_webui/utils/misc.py @@ -5,6 +5,7 @@ from datetime import timedelta from pathlib import Path from typing import Callable, Optional +import json import collections.abc @@ -445,3 +446,14 @@ def parse_ollama_modelfile(model_text): data["params"]["messages"] = messages return data + +def convert_logit_bias_input_to_json(user_input): + logit_bias_pairs = user_input.split(',') + logit_bias_json = {} + for pair in logit_bias_pairs: + token, bias = pair.split(':') + token = str(token.strip()) + bias = int(bias.strip()) + bias = 100 if bias > 100 else -100 if bias < -100 else bias + logit_bias_json[token] = bias + return json.dumps(logit_bias_json) \ No newline at end of file From 41a2b86e70b0df0c99c35b5f683be1dfc1a38ced Mon Sep 17 00:00:00 2001 From: Yifang Deng Date: Fri, 28 Feb 2025 10:01:39 +0100 Subject: [PATCH 128/623] fix: add redis lock timeout config --- backend/open_webui/env.py | 1 + backend/open_webui/socket/main.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index ba546a2eb5a..4be87dbadce 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -386,6 +386,7 @@ def parse_section(section): WEBSOCKET_MANAGER = os.environ.get("WEBSOCKET_MANAGER", "") WEBSOCKET_REDIS_URL = os.environ.get("WEBSOCKET_REDIS_URL", REDIS_URL) +WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS = os.environ.get("WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS", 60) AIOHTTP_CLIENT_TIMEOUT = os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "") diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py index 6f591512272..3813cb0825b 100644 --- a/backend/open_webui/socket/main.py +++ b/backend/open_webui/socket/main.py @@ -12,6 +12,7 @@ ENABLE_WEBSOCKET_SUPPORT, WEBSOCKET_MANAGER, WEBSOCKET_REDIS_URL, + WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS, ) from open_webui.utils.auth import decode_token from open_webui.socket.utils import RedisDict, RedisLock @@ -61,7 +62,7 @@ clean_up_lock = RedisLock( redis_url=WEBSOCKET_REDIS_URL, lock_name="usage_cleanup_lock", - timeout_secs=TIMEOUT_DURATION * 2, + timeout_secs=WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS, ) aquire_func = clean_up_lock.aquire_lock renew_func = clean_up_lock.renew_lock From bcdbc4fb8aefef387aadcf07646664a2d0d3f92d Mon Sep 17 00:00:00 2001 From: orenzhang Date: Fri, 28 Feb 2025 17:47:27 +0800 Subject: [PATCH 129/623] i18n(zh-cn): add chinese translation --- src/lib/i18n/locales/zh-CN/translation.json | 60 ++++++++++----------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index c3fe7c09e47..f46835953ae 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -64,7 +64,7 @@ "Allow Voice Interruption in Call": "允许通话中的打断语音", "Allowed Endpoints": "允许的端点", "Already have an account?": "已经拥有账号了?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方法,旨在确保质量和多样性之间的平衡。参数 p 表示相对于最可能令牌的概率,一个令牌被考虑的最小概率。例如,当 p=0.05 且最可能的令牌概率为 0.9 时,概率值小于 0.045 的词元将被过滤掉。", "Always": "保持", "Amazing": "很棒", "an assistant": "一个助手", @@ -86,7 +86,7 @@ "Archive All Chats": "归档所有对话记录", "Archived Chats": "已归档对话", "archived-chat-export": "导出已归档对话", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "是否确认清除所有记忆?清除后无法还原", "Are you sure you want to delete this channel?": "是否确认删除此频道?", "Are you sure you want to delete this message?": "是否确认删除此消息?", "Are you sure you want to unarchive all archived chats?": "是否确认取消所有已归档的对话?", @@ -129,7 +129,7 @@ "Bocha Search API Key": "Bocha Search API 密钥", "Brave Search API Key": "Brave Search API 密钥", "By {{name}}": "由 {{name}} 提供", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "绕过嵌入和检索", "Bypass SSL verification for Websites": "绕过网站的 SSL 验证", "Calendar": "日历", "Call": "呼叫", @@ -163,7 +163,7 @@ "Ciphers": "加密算法 (Ciphers)", "Citation": "引文", "Clear memory": "清除记忆", - "Clear Memory": "", + "Clear Memory": "清除记忆", "click here": "点击此处", "Click here for filter guides.": "点击此处查看 filter 指南。", "Click here for help.": "点击这里获取帮助。", @@ -208,19 +208,19 @@ "Confirm your new password": "确认新密码", "Connect to your own OpenAI compatible API endpoints.": "连接到你自己的与 OpenAI 兼容的 API 接口端点。", "Connections": "外部连接", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "约束推理模型的推理努力程度。仅适用于支持推理努力控制的特定提供商的推理模型。", "Contact Admin for WebUI Access": "请联系管理员以获取访问权限", "Content": "内容", - "Content Extraction Engine": "", + "Content Extraction Engine": "内容提取引擎", "Context Length": "上下文长度", "Continue Response": "继续生成", "Continue with {{provider}}": "使用 {{provider}} 继续", "Continue with Email": "使用邮箱登录", "Continue with LDAP": "使用 LDAP 登录", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制消息文本如何拆分以用于 TTS 请求。“Punctuation”拆分为句子,“paragraphs”拆分为段落,“none”将消息保留为单个字符串。", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "控制生成文本中标记序列的重复度。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如1.1)则更为宽松。当值为1时,此功能将被禁用。", "Controls": "对话高级设置", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "控制输出文本中连贯性和多样性之间的平衡。较低的值将产生更加专注和连贯的文本。", "Copied": "已复制", "Copied shared chat URL to clipboard!": "已复制此对话分享链接至剪贴板!", "Copied to clipboard": "已复制到剪贴板", @@ -248,7 +248,7 @@ "Current Model": "当前模型", "Current Password": "当前密码", "Custom": "自定义", - "Danger Zone": "", + "Danger Zone": "危险区域", "Dark": "暗色", "Database": "数据库", "December": "十二月", @@ -346,7 +346,7 @@ "ElevenLabs": "ElevenLabs", "Email": "电子邮箱", "Embark on adventures": "踏上冒险之旅", - "Embedding": "", + "Embedding": "嵌入", "Embedding Batch Size": "嵌入层批处理大小 (Embedding Batch Size)", "Embedding Model": "语义向量模型", "Embedding Model Engine": "语义向量模型引擎", @@ -358,7 +358,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "启用内存映射(mmap)以加载模型数据。此选项允许系统通过将磁盘文件视为在RAM中来使用磁盘存储作为RAM的扩展。这可以通过更快的数据访问来提高模型性能。然而,它可能无法在所有系统上正常工作,并且可能会消耗大量磁盘空间。", "Enable Message Rating": "启用回复评价", - "Enable Mirostat sampling for controlling perplexity.": "", + "Enable Mirostat sampling for controlling perplexity.": "启用Mirostat采样以控制困惑度", "Enable New Sign Ups": "允许新用户注册", "Enabled": "启用", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。", @@ -566,12 +566,12 @@ "Include": "包括", "Include `--api-auth` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api-auth` 参数", "Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "影响算法对生成文本反馈的响应速度。较低的学习率将导致调整更慢,而较高的学习率将使算法反应更灵敏。", "Info": "信息", "Input commands": "输入命令", "Install from Github URL": "从 Github URL 安装", "Instant Auto-Send After Voice Transcription": "语音转录文字后即时自动发送", - "Integration": "", + "Integration": "集成", "Interface": "界面", "Invalid file format.": "无效文件格式。", "Invalid Tag": "无效标签", @@ -619,7 +619,7 @@ "Listening...": "正在倾听...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "大语言模型可能会生成误导性错误信息,请对关键信息加以验证。", - "Loader": "", + "Loader": "加载器", "Loading Kokoro.js...": "载入 Kokoro.js...", "Local": "本地", "Local Models": "本地模型", @@ -697,7 +697,7 @@ "No HTML, CSS, or JavaScript content found.": "未找到 HTML、CSS 或 JavaScript 内容。", "No inference engine with management support found": "未找到支持管理的推理引擎", "No knowledge found": "未找到知识", - "No memories to clear": "", + "No memories to clear": "记忆为空,无须清理", "No model IDs": "没有模型 ID", "No models found": "未找到任何模型", "No models selected": "未选择任何模型", @@ -809,7 +809,7 @@ "Reasoning Effort": "推理努力", "Record voice": "录音", "Redirecting you to Open WebUI Community": "正在将您重定向到 OpenWebUI 社区", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "降低生成无意义内容的概率。较高的值(如100)将产生更多样化的回答,而较低的值(如10)则更加保守。", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "使用\"User\" (用户) 来指代自己(例如:“User 正在学习西班牙语”)", "References from": "来自", "Refused when it shouldn't have": "无理拒绝", @@ -835,7 +835,7 @@ "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "无法激活回复时发送通知。请检查浏览器设置,并授予必要的访问权限。", "Response splitting": "拆分回复", "Result": "结果", - "Retrieval": "", + "Retrieval": "检索", "Retrieval Query Generation": "检索查询生成", "Rich Text Input for Chat": "对话富文本输入", "RK": "排名", @@ -918,11 +918,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "设置用于计算的工作线程数量。该选项可控制并发处理传入请求的线程数量。增加该值可以提高高并发工作负载下的性能,但也可能消耗更多的 CPU 资源。", "Set Voice": "设置音色", "Set whisper model": "设置 whisper 模型", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets how far back for the model to look back to prevent repetition.": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", - "Sets the size of the context window used to generate the next token.": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "对至少出现过一次的标记设置固定偏置值。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如0.9)则更为宽松。当值为0时,此功能将被禁用。", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "根据标记出现的次数,设置一个缩放偏置值来惩罚重复。较高的值(例如1.5)将更强烈地惩罚重复,而较低的值(例如0.9)则更为宽松。当值为0时,此功能将被禁用。", + "Sets how far back for the model to look back to prevent repetition.": "设置模型回溯的范围,以防止重复。", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "设置用于生成的随机数种子。将其设置为特定数字将使模型针对同一提示生成相同的文本。", + "Sets the size of the context window used to generate the next token.": "设置用于生成下一个 Token 的上下文窗口的大小。", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "设置要使用的停止序列。遇到这种模式时,大语言模型将停止生成文本并返回。可以通过在模型文件中指定多个单独的停止参数来设置多个停止模式。", "Settings": "设置", "Settings saved successfully!": "设置已成功保存!", @@ -964,7 +964,7 @@ "System Prompt": "系统提示词 (System Prompt)", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "无尾采样用于减少输出中出现概率较小的 Token 的影响。较高的值(例如 2.0)将进一步减少影响,而值 1.0 则禁用此设置。", "Talk to model": "与模型交谈", "Tap to interrupt": "点击以中断", "Tasks": "任务", @@ -979,7 +979,7 @@ "Thanks for your feedback!": "感谢您的反馈!", "The Application Account DN you bind with for search": "您所绑定用于搜索的 Application Account DN", "The base to search for users": "搜索用户的 Base", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "批处理大小决定了一次可以处理多少个文本请求。更高的批处理大小可以提高模型的性能和速度,但也需要更多内存。", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的背后开发者是社区中热情的志愿者。如果此插件有帮助到您,烦请考虑一下为它的开发做出贡献。", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "排行榜基于 Elo 评级系统并实时更新。", "The LDAP attribute that maps to the mail that users use to sign in.": "映射到用户登录时使用的邮箱的 LDAP 属性。", @@ -988,14 +988,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "最大文件大小(MB)。如果文件大小超过此限制,则无法上传该文件。", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "在单次对话中可以使用的最大文件数。如果文件数超过此限制,则文件不会上传。", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "分值应介于 0.0(0%)和 1.0(100%)之间。", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "模型的温度。增加温度将使模型的回答更有创意。", "Theme": "主题", "Thinking...": "正在思考...", "This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "此选项控制刷新上下文时保留多少 Token。例如,如果设置为 2,则将保留对话上下文的最后 2 个 Token。保留上下文有助于保持对话的连续性,但可能会降低响应新主题的能力。", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "此项用于设置模型在其响应中可以生成的最大 Token 数。增加此限制可让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。", "This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成", "This will delete": "这将删除", @@ -1132,7 +1132,7 @@ "Why?": "为什么?", "Widescreen Mode": "宽屏模式", "Won": "获胜", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "与 top-k 配合使用。较高的值(例如 0.95)将产生更加多样化的文本,而较低的值(例如 0.5)将产生更加集中和保守的文本。", "Workspace": "工作空间", "Workspace Permissions": "工作空间权限", "Write": "写作", @@ -1155,6 +1155,6 @@ "Your account status is currently pending activation.": "您的账号当前状态为待激活。", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "您的全部捐款将直接给到插件开发者,Open WebUI 不会收取任何比例。但众筹平台可能会有服务费、抽成。", "Youtube": "YouTube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "Youtube 语言", + "Youtube Proxy URL": "Youtube 代理 URL" } From 8b3fcfea1319b612a27197fe9c7517cbf91da3ef Mon Sep 17 00:00:00 2001 From: Thomas Rehn <271119+tremlin@users.noreply.github.com> Date: Wed, 5 Feb 2025 13:43:35 +0100 Subject: [PATCH 130/623] feat: add alternative input configuration where Ctrl-Enter is required to send chat messages --- src/lib/components/chat/MessageInput.svelte | 38 ++++++++++--------- .../components/chat/Settings/Interface.svelte | 33 ++++++++++++++++ src/lib/stores/index.ts | 1 + 3 files changed, 54 insertions(+), 18 deletions(-) diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index f065c799734..d9e0490fedd 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -676,12 +676,13 @@ bind:value={prompt} id="chat-input" messageInput={true} - shiftEnter={!$mobile || - !( - 'ontouchstart' in window || - navigator.maxTouchPoints > 0 || - navigator.msMaxTouchPoints > 0 - )} + shiftEnter={!($settings?.alternativeEnterBehavior ?? false) && + (!$mobile || + !( + 'ontouchstart' in window || + navigator.maxTouchPoints > 0 || + navigator.msMaxTouchPoints > 0 + ))} placeholder={placeholder ? placeholder : $i18n.t('Send a Message')} largeTextAsFile={$settings?.largeTextAsFile ?? false} autocomplete={$config?.features.enable_autocomplete_generation} @@ -805,19 +806,20 @@ navigator.msMaxTouchPoints > 0 ) ) { - // Prevent Enter key from creating a new line - // Uses keyCode '13' for Enter key for chinese/japanese keyboards - if (e.keyCode === 13 && !e.shiftKey) { + // Uses keyCode '13' for Enter key for chinese/japanese keyboards. + // + // Depending on the user's settings, it will send the message + // either when Enter is pressed or when Ctrl+Enter is pressed. + const submitMessage = + ($settings?.alternativeEnterBehavior ?? false) + ? e.keyCode === 13 && isCtrlPressed + : e.keyCode === 13 && !e.shiftKey; + + if (submitMessage) { e.preventDefault(); - } - - // Submit the prompt when Enter key is pressed - if ( - (prompt !== '' || files.length > 0) && - e.keyCode === 13 && - !e.shiftKey - ) { - dispatch('submit', prompt); + if (prompt !== '' || files.length > 0) { + dispatch('submit', prompt); + } } } } diff --git a/src/lib/components/chat/Settings/Interface.svelte b/src/lib/components/chat/Settings/Interface.svelte index 205dc7102e2..694971372c3 100644 --- a/src/lib/components/chat/Settings/Interface.svelte +++ b/src/lib/components/chat/Settings/Interface.svelte @@ -37,6 +37,7 @@ let landingPageMode = ''; let chatBubble = true; let chatDirection: 'LTR' | 'RTL' = 'LTR'; + let alternativeEnterBehavior = false; let imageCompression = false; let imageCompressionSize = { @@ -193,6 +194,11 @@ saveSettings({ chatDirection }); }; + const toggleAlternativeEnterBehavior = async () => { + alternativeEnterBehavior = !alternativeEnterBehavior; + saveSettings({ alternativeEnterBehavior }); + }; + const updateInterfaceHandler = async () => { saveSettings({ models: [defaultModelId], @@ -232,6 +238,7 @@ notificationSound = $settings.notificationSound ?? true; hapticFeedback = $settings.hapticFeedback ?? false; + alternativeEnterBehavior = $settings.alternativeEnterBehavior ?? false; imageCompression = $settings.imageCompression ?? false; imageCompressionSize = $settings.imageCompressionSize ?? { width: '', height: '' }; @@ -652,6 +659,32 @@
--> +
+
+
+ {$i18n.t('Enter Key Behavior')} +
+ + +
+
+
diff --git a/src/lib/stores/index.ts b/src/lib/stores/index.ts index 1f6b400e079..c24e0e2941e 100644 --- a/src/lib/stores/index.ts +++ b/src/lib/stores/index.ts @@ -140,6 +140,7 @@ type Settings = { title?: TitleSettings; splitLargeDeltas?: boolean; chatDirection: 'LTR' | 'RTL'; + alternativeEnterBehavior?: boolean; system?: string; requestFormat?: string; From 331caad5b8aafc241ec239960f84b44605c6c64e Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 28 Feb 2025 07:36:56 -0800 Subject: [PATCH 131/623] fix --- src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index 18a4585c647..baecd8cd6e8 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -91,7 +91,7 @@ onCode={(value) => { dispatch('code', value); }} - onSave={(e) => { + onSave={(value) => { dispatch('update', { raw: token.raw, oldContent: token.text, From c06484297b47a4eece059c9995b6520820923541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=B4mulo=20Mendes=20Figueiredo?= Date: Fri, 28 Feb 2025 12:39:57 -0300 Subject: [PATCH 132/623] i18n: ask and explain floating buttons --- .../components/chat/ContentRenderer/FloatingButtons.svelte | 7 ++++--- src/lib/i18n/locales/en-US/translation.json | 3 +++ src/lib/i18n/locales/pt-BR/translation.json | 3 +++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/lib/components/chat/ContentRenderer/FloatingButtons.svelte b/src/lib/components/chat/ContentRenderer/FloatingButtons.svelte index d7ad58c00b0..6dc48529bcc 100644 --- a/src/lib/components/chat/ContentRenderer/FloatingButtons.svelte +++ b/src/lib/components/chat/ContentRenderer/FloatingButtons.svelte @@ -121,7 +121,8 @@ toast.error('Model not selected'); return; } - prompt = `Explain this section to me in more detail\n\n\`\`\`\n${selectedText}\n\`\`\``; + const explainText = $i18n.t('Explain this section to me in more detail'); + prompt = `${explainText}\n\n\`\`\`\n${selectedText}\n\`\`\``; responseContent = ''; const [res, controller] = await chatCompletion(localStorage.token, { @@ -246,7 +247,7 @@ > -
Ask
+
{$i18n.t('Ask')}
{:else} diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index d0930120ca0..0dc59a8a05b 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -93,6 +93,7 @@ "Are you sure?": "", "Arena Models": "", "Artifacts": "", + "Ask": "", "Ask a question": "", "Assistant": "", "Attach file from knowledge": "", @@ -443,6 +444,8 @@ "Exclude": "", "Execute code for analysis": "", "Experimental": "", + "Explain": "", + "Explain this section to me in more detail": "", "Explore the cosmos": "", "Export": "", "Export All Archived Chats": "", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index e7dbc89dd49..1b0b9eb749a 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -93,6 +93,7 @@ "Are you sure?": "Você tem certeza?", "Arena Models": "Arena de Modelos", "Artifacts": "Artefatos", + "Ask": "Perguntar", "Ask a question": "Faça uma pergunta", "Assistant": "Assistente", "Attach file from knowledge": "", @@ -443,6 +444,8 @@ "Exclude": "Excluir", "Execute code for analysis": "", "Experimental": "Experimental", + "Explain": "Explicar", + "Explain this section to me in more detail": "Explique esta seção em mais detalhes", "Explore the cosmos": "Explorar o cosmos", "Export": "Exportar", "Export All Archived Chats": "Exportar todos os chats arquivados", From 88c02d5a140e732129d1ec9006324a6f1c69074f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 28 Feb 2025 16:02:15 -0800 Subject: [PATCH 133/623] refac --- backend/open_webui/env.py | 2 +- backend/open_webui/socket/main.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 4be87dbadce..d689281fa20 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -386,7 +386,7 @@ def parse_section(section): WEBSOCKET_MANAGER = os.environ.get("WEBSOCKET_MANAGER", "") WEBSOCKET_REDIS_URL = os.environ.get("WEBSOCKET_REDIS_URL", REDIS_URL) -WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS = os.environ.get("WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS", 60) +WEBSOCKET_REDIS_LOCK_TIMEOUT = os.environ.get("WEBSOCKET_REDIS_LOCK_TIMEOUT", 60) AIOHTTP_CLIENT_TIMEOUT = os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "") diff --git a/backend/open_webui/socket/main.py b/backend/open_webui/socket/main.py index 3813cb0825b..8f5a9568b26 100644 --- a/backend/open_webui/socket/main.py +++ b/backend/open_webui/socket/main.py @@ -12,7 +12,7 @@ ENABLE_WEBSOCKET_SUPPORT, WEBSOCKET_MANAGER, WEBSOCKET_REDIS_URL, - WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS, + WEBSOCKET_REDIS_LOCK_TIMEOUT, ) from open_webui.utils.auth import decode_token from open_webui.socket.utils import RedisDict, RedisLock @@ -62,7 +62,7 @@ clean_up_lock = RedisLock( redis_url=WEBSOCKET_REDIS_URL, lock_name="usage_cleanup_lock", - timeout_secs=WEBSOCKET_REDIS_LOCK_TIMEOUT_IN_SECONDS, + timeout_secs=WEBSOCKET_REDIS_LOCK_TIMEOUT, ) aquire_func = clean_up_lock.aquire_lock renew_func = clean_up_lock.renew_lock From fe44e4d3448b934c0f316390d5141dec3348d906 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 28 Feb 2025 16:06:28 -0800 Subject: [PATCH 134/623] i18n(html): sync html language with i18n language --- src/lib/components/chat/Settings/General.svelte | 4 ++-- src/lib/i18n/index.ts | 8 ++++++++ src/routes/+layout.svelte | 4 ++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/lib/components/chat/Settings/General.svelte b/src/lib/components/chat/Settings/General.svelte index 00cf1d786dd..73ac662d01c 100644 --- a/src/lib/components/chat/Settings/General.svelte +++ b/src/lib/components/chat/Settings/General.svelte @@ -1,7 +1,7 @@ {#if show} @@ -18,6 +44,7 @@
import { toast } from 'svelte-sonner'; - import { onMount, getContext } from 'svelte'; + import { onMount, getContext, tick } from 'svelte'; import { goto } from '$app/navigation'; import { page } from '$app/stores'; @@ -115,6 +115,29 @@ let onboarding = false; + async function setLogoImage() { + await tick(); + const logo = document.getElementById('logo'); + + if (logo) { + const isDarkMode = document.documentElement.classList.contains('dark'); + + if (isDarkMode) { + const darkImage = new Image(); + darkImage.src = '/static/favicon-dark.png'; + + darkImage.onload = () => { + logo.src = '/static/favicon-dark.png'; + logo.style.filter = ''; // Ensure no inversion is applied if favicon-dark.png exists + }; + + darkImage.onerror = () => { + logo.style.filter = 'invert(1)'; // Invert image if favicon-dark.png is missing + }; + } + } + } + onMount(async () => { if ($user !== undefined) { await goto('/'); @@ -122,6 +145,8 @@ await checkOauthCallback(); loaded = true; + setLogoImage(); + if (($config?.features.auth_trusted_header ?? false) || $config?.features.auth === false) { await signInHandler(); } else { @@ -154,9 +179,10 @@
From 36ffa9824bb82a4703a818f178bf9ef6db1dde4c Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 01:16:25 -0800 Subject: [PATCH 185/623] refac --- backend/open_webui/utils/auth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index daf85b97c9f..d0c02a569c3 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -87,8 +87,8 @@ def get_license_data(app, key): setattr(app.state, "USER_COUNT", v) elif k == "name": setattr(app.state, "WEBUI_NAME", v) - elif k == "info": - setattr(app.state, "LICENSE_INFO", v) + elif k == "metadata": + setattr(app.state, "LICENSE_METADATA", v) return True else: log.error( From 0a295ff56d1efb92857fd4167507a7dc32f4c9f6 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 01:25:47 -0800 Subject: [PATCH 186/623] refac: icons --- static/favicon/apple-touch-icon.png | Bin 4625 -> 7512 bytes static/favicon/favicon-96x96.png | Bin 3957 -> 3826 bytes static/favicon/favicon.ico | Bin 15086 -> 15086 bytes static/favicon/favicon.svg | 2 +- static/favicon/web-app-manifest-192x192.png | Bin 8636 -> 8349 bytes static/favicon/web-app-manifest-512x512.png | Bin 31203 -> 30105 bytes 6 files changed, 1 insertion(+), 1 deletion(-) diff --git a/static/favicon/apple-touch-icon.png b/static/favicon/apple-touch-icon.png index 77ae5bd6cd31404046dd669f0b69c47e49c81b2d..ece4b85dbc8d7ae1e684d21fed069c25c00d4a84 100644 GIT binary patch literal 7512 zcmV-e9jD@nP)Py75=lfsRCr$Pod>kt)Uk&Dp_^igL+>SkX@(a<$B@ulz}O~WO6U-pL+IELFdf39 zhS0&pKa^ct$^gl1}hz>Ype*YEiH7D-1sM`h1icd-_hwzg*f+GqC6KQksT zu{7YEn^Mv=(cjdPrjsPxDQON#vq_p!QeR2aOPX8~*G>{WoIp=1UG${I8SyCTK=!hM*-MVs; z*x~u_*OLA!=^Fmb?4PAZVTO}$_abHZD#?a&&H>g1B`qpx6-lc}T0&A@yKb@x#=n}~ z{2592O1h8To88>ZFjZa8Tv&y|^N+fn1 zD4!1QKTOi?Rok&EvCz&r-UC}0&&1x6mXkDT%0;isZI~Vp2Ikmlk{*`ymYLy2DYcXy z(2=)d2T5B?S|$o+lzLHGJbBC!{M(lXfuS?a?6Vd>KhwvR7SPT)Oo{b^h}=n%=FU_r z^&RY;z)P~Lq&v&%_A4ErBOtF6xTDyWv6ZXhax2Pb#9{HyAe?`Wq}$Al5PfN+RDgEQ z%_ZrKAc*iYNra}oApx)D zk}`zY3k_)J9G;3b0(RO#l2()i?Apq^1TT0T&!B5$$H6{=h z0S{^BfGvH0z~tM@%;06WRoV_{=N!>Kc)R}K*H$Pr{cy5qHZwEWd~K7q0opl70_k?a zj(lXm*CqAIOZheMVdRy%pk4ITwgB2WNA~n_l78hQe&t7Tr#B$Ncu|nCzJEKy<}C-b zbB^?XB7VDhif84(-0?!#)nv(VSZrx_dP@QAoTCiDi2+@4s0VXp!QAm;@U5?cpKfNb z_*x<@1GICFS{8qj1XHeM&uxho{X@Fb02wm3!c{jj?Cad6CBTZne2S#a8>$H9{&M~S zh0p-Jk3UNq-U?9X4(Q0tI4?-|S=)oT5CCh(BmnhoaM<%UKXL_hbO1azvYB&zRy$V7 zKQoaAFh4Gnv~!*Z#oRy^x#sX@?BNf9i59-f7jsY~LF1sjTza_xI;x35uHVi+lwD z3;HoiqX9f(=Qo?9m<6CIka$H@EU`4&^Ajzc2FOtyYG$A0Xm|Dj*+xUl85+fy#`Wh z;Ciy0l5`VD3TPTulA`X5ji+28e_J9Az{=ak%y2%$lGuQbT0vYH^)-n#xWCA2AOj8D z7*%A7)#^D3plM(UpnXr13|7ex5nBWFd>LkDltk!NVgcGY*Ei}!N?i>vUIVSxz?(ta zxkb#3`guJ{uYissg8N3LyLubX|GU>fUK*fJ`yb4VXkxb#2hj5b4K4p7Y3aOX>c{mO zh@k-zM+Ws)1|bfhDcg0i@6Z}U<^Ga9H9$h`mSzUCGu;jyah=I1pb)tiuYq=J;34Q1 zJ$2#gaff!!K|rE_wQmI9?xpn4M+1~V*uc!Fm)E6q3+Sk}#(g9)R#lfj{P2T5`|LA) z`st_o@WT)F_~Vc3si&UOi!Z*Yx88b7AAInEzWnk_{p(*4z@+J>n@-bDKfUIfYc9<@ z@4Ol?U_kgc|NQf7h8bqij5E%tsirCx`SAJYpVwlGEtYr*`}Xat>#x6F0|yRFd~eP- zJS=HocLmjZ0yN34?9Mcauap4LyYIeRcinYY2;i4repzG3j@7^aoxJtlB$G@Mf*DZv z@84f5tgwQ9{_~$}kwq2}|JH7)AfV|zP7(l{?L^Wgpd;J)c&gh}sq`(o@kuA0q?1oR zS)YIYx!9qd>(L987>|H>iYcbhYOAfLBaS#iKmF-X6OWPQ4HX153r3Cc1G=)My8|>m zPiS0DcdAm$_uqf7zyJO3y8QCXb7$PlQ*2YeznO(U*}Lqri`H0U4b3vkEUCvz zdt(IwjVhj$v{qNW$GZbGrEf7eXzWv3!4_thyz|aG_5SN->Z`BT{`>DA9_)%F>#et* zPC4ZiEwI1>nsn0qoktY~G)sD&nQa^g&_o9RQC1StV~;(iqmDXC*Ijp=FgJ=Q3of`| zxLfbK>#mx7@_c^;D-38zOADLX8;xt+)eij|NtD7Zo!oZYZQ5$9twI-E)`z{Ul>v~k ztH+EPqkjGRWj*}(@hS{x-jRkxbRZrt6978mJl`Z~wcIqE!@ysB@r6!5{dA2SIkK?3 za#yd7Escv0ugelkQ0FpxsW_lXrp1Lfo`u&L&}fyz;Jn$ZHlGo{`|dmKzWeUFE3dpVbH8=P0gW9EVTr_(P&xw|dxR1Ng_m!>`9{C}?QeC?Ip>I7x(Y(1@xlu) z)N;!$m)Yx79MI_L6f@)8*Cn7IFAMJ(PsXTGqx9{!e5BP8cIumNzBv{DqAM-b0MJjE z*)kIWI;w_53)@21cZP3q@x>Qw+ikaP8Hx#K6DDtka8SfoJ$m$L{pd%{clD?Npb4_f z(GfKqZ*n;2sLy*=D;BMpXFU4oqZ&MTa7ZS~S^(2*v(2WZmtIcky|%`U8<%EH$KNw#$PgWU^wCZIgvXr#SursF_St72 zz4zXGac}GLx;58aQ#ajoQz{};O#lt){}?kHVa_>{;OP9?-07pKCF}X-b=O@tv{9Q8 zNJ|7QT0z-toF_K$)?07g)W^sn^@ba6&^qg^lUM~c0W^A{)W!|~8mbp{!wN}Adho#q zwerd|+Db1&hYr;lXPl9VUpMJ2Fp=Py?6uck`uO9Ilioi5dOk=@B|I`oq(*?I+{O?9 zJ+~ybzy3)S)rxEJY_O2ca5WZReDQF1pL5PRTQTJ>pTT?Z>#)ZjdkBw68XPuurMf@M zn*cJbK!Bb0V2i(d=bedv#g`fZ`kxd;0?>tZ*qqkH(QaeHOu zmP_*duC67=k!*1C8`B6Gq(*?ob+;mbroBY$3h7Cd+3dBu?Y3JOiAy7duK9^3 zlwBlYhu&^GTB0Wx+>QWr;e{8DeRGWfjZQ)vno_wZ#MV#?FC>S z=bqDs8*ZrAUVAO^5Pam2v*ws1UNeQ70s1lkeU~Ka*cD0!4H~3tucbUlGJ!+Pf~P4^ znCwaEuO{1O*?64RNztY6j?g0W@Wm3Rm)L_St7o6*49WbImo^2$x-v z1+Ft5n4|)?g9L|P{32c*pqc^tDF998EX8FCBSh2ybei-k+@ zcz$3C#*vx<`gH*PVbOvrFTC)AmRf45#D^yA=fe&=EaXiWT_7(}Z#qp0-(!wBMh6^l zKrH`y%>exgfQB4hsLrqi4=^2)E;c9s^Ugc3rTK=Z+^416LV@sNsG0by>H&$LW)ANTsMauDhD;&_4j^Y%6|e zfTUY)xg~UKCG|l=v!nz7{NXalCYx-czx;)cdC3I(`OGuV^nmU{ZG{9h>^PEElLDF@ z8X`Q&0A&yl=Qz;Pk)Y0LG{THknMg7U_(=HU;8-Kr@(6p}_UV=PU!=pqP7B(o;vI^ecD`?d-fQC@eeg=F4HYngiq%TciWy>wM3|)8d zp^GeOQ_DE`LT-vnL9H2}#}+p9J@?#mhs`gO3LFQx4safdEM%XL7%?L86bZ(VqlgzM zj?@g$Vd$GG=!Hv=Wv6A~k_w$(@44rmus&T;g<2gH&EfEyR7hAL1Nbh<>$K6}BTXyRpQe^|5~&fOKa#X^p-X}B z6HTQcN)n_MGGE9sB)K&1K`QODy1Ng%nf~6G8fhYfj7!n;+i$-;u?bZpK#wm4HdACn z7cNnjg~VGiV8DRXKwu2oSWw_4iS3ycs9_93^(0iCwyqr?B9u`GNl8=7Y6NKdlniEv zCPsW74Ggnd#H`1mdMHgPn~_ykStZ=zrkaZC!L1Q~VMu7DNqsYIDrh2f#~pWQ;J|^2 zCtf2!Q;uW1QWQL=;}nhFX-eOWthCZf;Vy?wowxZx>kQf*6Xd~uZbslu^FgPnZCw*U z|9`=A6z!ZVbV~~ASPUONJS62blVAggF*|UbB|Tgv0VJ*8@D)?+JWXJz@mYkrsR9+Z zuuSK9)&$TKwWRnTNe>s%b=R>#go|-`HM>i9j7L@d0}nhfEPNQZjayHk=0l^m3i*)O z%V~JdoyTWhxE<4wm^#L)384S$vNLF_pUv_H< z_H=E92n9SbVx&<3JkzkZr}>Zyet4`|tq2|v>oIVl`!R)v4I38vuBl_!tkeL|T{o|f zcIn3@6_**`u)4U!nrXt*EKu^gpIpitZ@e-5b;v?$0O%*oY&^GNybT=z8fx3{EEJjd z7`VfzW`E?7N2cn*lK1@5jL*T8j$cVTUG?;=8UXqfGy6@44mtyRNYucv*rhw8*#m0a zT5zAsF4*mqG2?KWrBocy-$)u}W>K=RSv0?GTEE?O#cwDBJ zU^C;MBU63JC6~-}YzC`1pebsyp_x6?9iVAbdXuEpGuLd6148;?cZ0w~ncA`lq3pqf z2kXQWPYlz>@{o!H8bZ?gX7*K=fR5~F?2(i5P;TC1lGa3{&R1W3HFKYE)^mp>MTIAR zS#S&HB^3ws-exvIO49^3bOdP34SK1S!jA3?1iH?0SR6BEOsY(mynxj>E~drEks~!^ z$dKH#X(|lpZ=s2s+1N(iP6X%()YN#5*Tkb02uS!0%#Md1dMJ$i(Y>`OLPQc*BSimM zW|<{-Fjw4$zE096W;Q`N==cI?=;1!AaSM?V#!e^eaJ zo4VSln~Ltx-$jSP+dAW*J3tdPygg`{JRlBmihd1!0SQ)&9z9xw&RZqy$Xjf&MHu-5 z$dvJFwNw<)Pf1$S%xG9YfpiCGT2ve-iJBcMpG=Y>c#YSgGO@s`kemV_d9 zUNX%4_U$XWaY62Hcd?ra0vZzor^Wte1`oP-Ku2~o`98StirHVdNrY3t2JSNCdCK>~ zU&cg9nm_=N2$vg4xCBK=lVUff0ca7Vf`G(KmrY5Vz4{k(%n710UBHS0ZA(+c&NYB zYalNTP!Q>7UD?hZ5A5~8=bRfF5tb&;YnFaouYnjEz*V=UnO)J-wQ&GFN8rAre${d@ z9_lag8pulnPe|Ihb7Zjd!{Pv%YjzL%fuEMwO#QfC12Hr}$=2VS*$++a(A?~tgM~*7 zIes127&G*jcn!qV0I7lf&8*joJ-zPG9S@A^NYnJBl%%DYiu0EhPXoB;hM3v)ai*38 z&~O|tm$X5gf&DdJ0~u)mP7#0}&v|azHUaz)X=wUV|nvoGULC=s9|P?Ib4T#?I$#eI2yU78#5qTI z%wLtg^g$e@`D-%O0OrL7W(J^}mMj51Yv2gKl#GI=XXOW|lLkn3-NMY?&*G&s19W6h zFBNsu@~*rrR;T)7d`;p=WA)HxUpZ4iN1$F`(uI;JR^!EMpgRrFMRhwfqkMB_k~yFw zP-73B=vjH0ty9$of zNXOT_-Y{1{N1&z*!g-P)Gf}w4OZ_xJrZ%N-cL@B=B)sM(xdS=^HI)nDwr=i0o!eR# zbzs0umeX)E!?TgAv;@!%p#GC2>gBcUxw&f358j&wFf(qIv~?@M%w$>$Eax2N2i}jt z{-D?!7E1r`900MI_sQGLXw+0o0o`y=ghNd=Cg`VLs;2?Kd`9HW$n5~wmD*Yk=m^xf z^o}P*-3MPPhh~B%qa_`Xmus#kjkN{P5vZxav|X@E9~t;2i&5^ra@cD53(@mrWW=7M zT4>9qZGi50Q|H`VN+AFpff_zFhsB-RsuBP2q#7V-MkqTIK6BECcfP(bfNn5B0QBKLN3q$Z z&Ha9I0Dos@=j8sR1P>@8pvh5OAp-Rp3EstD+8PZIEV(0EpT9Mm1DN%c3kT?iof;qY zL6Q#k#cZ-(_SEBX@WU&0Tr;HxQa?S$jfDktWLHCGg48rZ(y|`dF;+C6OTGyx3lBwf z;5&=9D>uG!p#j~nTa(T;EIKr{zF~GgASuhJMEclJE-|x@iazO50J;I#6tf|ux~7jX z<_;|)eq?0tW@c~Y{jS~BpNLVBv)d!|GkfTca!Ger5YmDAQ?PWk-YZ>#J=JLfnoQtB5r zA6}9EH8w{e5Iq|m_Gn;9gfH`&7`dZ0#sew~=mr~`Q2IzNw^5f*O4D1zwWbD%8yj07DvjE$V-BjhVd>V`Hn# zCA9){CvfBTTOf#lEi7qZ5JXrl=mhhG&A7!R*4MK6xp?A{BNKoO5tS*u6PS z^2csY=zW&xZ~7c}b9UWNBvF{!gP2(>f-y7nGu@y(n$mD-gQc|AkI08xD$5A zRN-&h$R1xP=nXPY1JG$oIzx0Vop!mlqXZf|EN(M`8RH~<76G>dv~Np#JpwPJAg-m) i^_OM_DqijZhQJUUSmov?aOqQ+wNx+3c8 zL=BdA^TYS^_XoW1oHH}$%$alUGxuC`UDrJ^dOGTq3kdF?n!N_aQN>!6u?UtXk8 zw7L}G)hEx*0(6cKQTI|Irgxz3=}_lU)8iiMIKM_s6>p^EyNzDz>|oYFw+GS(K?_$c zOCL*g^BOTS_!b{aH=q*XPAEgGRnBY-00>go=|iCo_Bs-3(71?7I%>3Ora=M?+J@s1 zF_EwFhM_G{J=4Hb03bJ2dWs1kNb+B~9Vmhx8#_9OVOsq_7b;ioTZ_>d^Pi?I065`k z+?ffIR#L%Ekv@s;v9L5AQgg79SF7tIV{0FD-E57J-mdz@1EN zpp~f-;2-?kZ!UKl<;(uPWkoY(urVTjh8)Nr?;=ODfNX)??H4O?4CXW7x9x=y)2H)H z9UebB;mh3-aOAM0IPUUPTuZ2>cyG9^j&T!sHNc%X8{EqfmIJ|^q~MZ2>)%L0ZC`NU zHnKfXs*Er@&W~dDv474MJodtC>Xu_qwx@<%>&MhKutkZI6*Zl$QS&&0@@t`=NTPb( z0-~CU{IlU=s0hH6E}n)l z?|(P347HD@1jGB{nQcRCZ_IKzM$1*3xliVsdD3D2nWo{mQ}8j8*ta8I;9r(f?b{oi3%e&7RxPPwQ_FmEB}G?3 zv~903J7_)CWEWcFM*M|YWGQGf7oO`kUDnXVfObQMse~fV_uCF6dGlV?{W<(j)z+?l zW|aHlEQwPYW$KV?zdG+n2Y|gTrv zgN~*&O$q?3O-vf*d*zZ>KdVD^=cO~q$2m%4AR6rMSl#~nDRL$Dk+BRPcRp=lgvCYM zL8rL#+q(^Sx+O1%3cSY+&bKD|<5?tr+-0c9il;#rFkn}%5Se^Ccrf&t`%M|0g_l?$ z0;h~jB+fAbdAVveHkam5UM1ob?(c7@q&9N{EFLL&8x4|7HnUT>hn(%2Ue*$}*1tK0 zAO@2cJi0LV{b#MCzgoe1htFEVF}pSxWc*tAZpZo3#=J|7AS;d2Va7sS zcE7nOQz0~1RafHdROetIeo0?T(Eb+7nQjs6gdE_C%fP2{W<*TPTHLlTzU%h0POdZV zqTGRF5G%WxhDBfEl)sC;(B#e(w02VA$qY@)0t7e?Knt95b`qb$c@%4s$Gs z66is)bmy6y5l4M!DuppeUesJ&f3@_X#LwwJ;I#;RcIOEb(lRW zcdV^kTJ7o;`9(33#54o&P?CEj0<=t}aFL%7rJEu0^#L?haMGiyVOlIv{wyO`Ojt~V z4sBnhH?b*-S{qDphEq^9L z67mwLv|IU)jtU{rZM9dIEt?<3O=heRd zp;0}-fg+IA?l_rwSHdXckXhQhSbDp(Xt%DWkeEHIA)?YUo{vyQttxt-Ro1_94u1*c zR74R$_>_o9EqX>qwKy|==e;8|01Lp_-&MF+{^sQ~@3~!8k~N_tE4V$(l^;6obRl!Y zJ9yZ%=&i;1lbKp_&W=|0*tiE+Ro;^8APH#KF!dRh7#1Iuv$b6>P=&tbpF)SKkV|5G z4UR`-4^+ar}-HB?QSm|hwQXU7kKh*%gTLgv=aXW1wtkxZ!(JO(pveDyJ^qZT`f4y5E^nk zEafK5YUa3lJPPqJ&U|hk%Pk1JY~GV}smT&&4?Ii&!BrB{Cc8 zAhatP%D98@GB3?(?*_dF5pJBd83Xqdhz=nKo`Q0pRot0Qa`>RyeAEZd#_Otda1P>j zI8S)IfK8_79_6C)sP>4rE5MNpSRn3HJ6iKQ4DkSxZ9rrL?Bvbyd@X&oizGhLCUJ73 zqC{&S_qE45fuXutj|T*iX3u4xK zs;&A{Z5oqc#_QfZE6U;z?R4X3jo41m=Zijx3DzYd56!Kk=nI=}h#ifWM2?CTt+}AI zjkXZ-yJs<8vLcc}_jo@B7B-!im;9s*=eKTSlMhxY=^IxItCu}!N1m%!C|WnmJ9nh4 z_PlWVCUjj3Mbc1$*`^y}0_iD%^>|%bbkCwS3p`FzdO9%5I7ox8-JbS^h`yW#^tqcY zd$aZ>fE^M;9bGt0q6YI^s#_kk#ug{7#3pm?JR+Pg(NmrgeR9=W8x!Q}eeY4RwCe|e ziP?}tUMSplJqJi@aPf6q{CI;rbDj|`x;-5zOr1hVKB+pczOb*3bepYi+h^&}l#T3` zKVO{KD8JykpP`N?87EYcC{aKCsyd_Q;Q^+t3HJ&tzJC!R<~B?v34etMJNdzA?iqpE z{Lc7`8ZFv0^-4GzZl!^(9(b+q5ZLWxcJZFJbztfN@#DrXRO~xErUKw`p;L4%`Ai^0 zkJiOrw(12W%Q?A9ohRaJfiR1nt@zE6W3U5BTakL=`z%&pD(kNYczJYTyos*Ygi6`Z zAr1hJizL5UV_kLQ5(n=I!KaigMgd0rejrbp2I~n;xRNc%&1y`ztmywZs6DM|DcD7} zJW=q(s=W|^@C$^INmwQ4eak?s8;G`mt1}6h@W4T3tG&Ax3B%q}_$@BV>a$LJ8Q!wc(uMoGQKOB_kgqpuDVc^6Vw+iC z$?N%;vxwlm(xZX%%KZ5=8h4P0Is0$nSQXd6yB>_jE7Ta$qg-NNBZkUf((WIkQjFsm zIIOZV^_W}6pV5D)9C@-z(Pp5Z98p!lTM?ik{?9h8!K8A3=@h!-#G95^r@m%UtX~d2 zM3WA`1Fl=#J}CpleBr*@_L6a(gI#B8SVNi28~_;V;#x~(I14g52V-IfUGGes6)Z9L zf|CJ>1;2C?re02s^ja;Loq79LJh$Q1lbK-*h-2rB{Kgaz>3}D@ahBGfl$BUh>G>*( zNy9l--3ZXPA1*iHyJRnNv-hEWqYgEcB(1sd-y{sdotiyI67C~ z%kGlZU3@m8Gf#@R1Ds#V3N3~GaMIAl2E(>{yHk;7vwW#BiFJ@CQq*=8uXk#?Iz9O- zgR%S%%Z`{YKIJt1##_uZ7P}UHLLqn3+q1oe8Mbz^WyHvc%5Ivj@Sr?Fys); zc_c!_kh-JQ{skU>0{(3almHlXe^&ZOZr%$OV*pI0AsX`CtEX+ZE)FBt&L8MVh;Cg? zCOZ)9yjVHw)(UuEFeLnI@35EAG9wXkGpqFMq%fAP1sR`H6o{wA1tUIHbINl807fK7 zwev3b7)j`PFHld-B{9uxC)JI&caii6wts8RG}WJ{wTQfe_0;p7)D(BQ+K(0z=TTk5p8--T^Kuo4qA)gT=0M<}QsX(=wER~@9jILN)Ob;_f z0yiF~U{{cH%-bwHr9rEvx(sfb>r+=44ve@ISq?9IL{dtA$^Jz>=t(Q^Q)M5?|eUb>?wJp)b3es_?kBYK&IH2Y6<|DT1Q$agDs&L&}iIN9NGUq z@7d)H0;SD*-^~p)G#i&m|F7YM1LX@vOAHH1I-_W-Y zB`0;vQ)>PAFCn1}pHEqx0x`bO;)Ln`E#d!dF*MC1#1nr~HSZ?`Lg$Bhd8IA@k0gzm z6#Op$uf34bbO{1+##g+f$;XkOB9_j-V*rI-(T$%Lq97s$5d;-PK%g^U+Yjf> zo1X4@bLY*x$DAt5(&=}*`}}>+Irp6Zo@iMv(yEI1cyuk))QA`b%mcsdR=FAA5UD)dopZJV16DW1Mt9W7+|59Optwy(M*( zR8ta8Hk$<}{eHwe&XzRY7=u$!o1_&0$8nlRZ2M%-)_f zkww$rB!4w30VXF%ub8B@o^RL^baBw{DFNWc^{pdmf$@@l5_Gc?_B)Z%FiEebvTi6e z0G_??70KI-m2_SSQ*PMTIUwoRk+jLo!uo|m0pLYCJta+$R4MG!meA`CMUs8L^gu{D zLP{C{@5}EIQT!hx08}lZN=^2)$ifblG%MMekbe{ap3NuB{B!QhFMts<;a6Um8alC; zEC7Ua2%(pq<`h?epp@mUWY%p!x)1u3rO4<;FS^ANUD+Y?Q;X50cr`2kjBdN_Hl1}ATrw-NJ`&h8HO9csbAKfk z0B9u3ijp*S^5jV^U%p)P=FQW}l`D1V&>>gxD2IQHF)CH6lu*VMS6rdH@4j2zx^>g_ z*Rw=PO}Nivk3FU*pL{azKou)i)VuG#tMkt%6JteKkP#7`rn#I3z%xHfCAG40s|17a z;(T=|{h4Q;(Wjq&n$#@;5UL)D~76L)3+RI_I-Mx8r%))!w`q0_{@ zd-LYaHFM@n*H?s*3<3ZNxH5Ns~k<=_eTkfPY7t7}>Kn8~~t} zV1Am|)IR3so_p@mE3ZJMOe}n$o zWx_!LZ_1P@?wTP>0a)#tA;)ppdAQED{hu*ohWhsH>wd9*QnqYab?MSY!-o%7&6+jE ze+Nk(5D!26u-<#mw|~yz4s-R@S8KzD4ZaJh49@`kz%2l9LnKM`Z=0*%y?ghnS+iyd z6~yv>$dDl#IdY`VIp>@pz32NK6Lj$4LA7bqMsK}E{o7xL4I8F$H+b-1J^l34 zzT=U=n=@xl0e@4M1pq7n05%)2Z`X>j;%(Zrsju^8HH?Wm`)qqG7edvmuDZ%iVaJnN zwQ6bM!iBo_+H3RuoCN@^1%T~dTL3Enj2bmck3RZn+_6ZW5gHPs7bbg|=m9%+?5LMs zdMWNDWJ_Ou`DNXDYkp#1769-O06-|UMl-+n-h1`J3x6-fRRlSj1q&9qi!<9YZQ3+9 z?Dg{lfWulNTCxCu0|3CXIJrD-0_~BXf8&ie;$Bg^c5N+Qyx27twq^bL^=jO>v2T_Y zfLQ27&%7)E;5Yyfk0xKu@X=uX`t{wgFrM(`1Hi=>({aYGY~H-tO=0-0kS9)@sD~bU zDBA#VN`DjpL|{rhbdC%P+s&b;6VpZ41#AH3vT@Ocd0*H9Km5kSy>42LXWA zUXZb@$e1x>^vENSzzPaA+sTtB7jT9)ZrrwQTYuloD`iRmpjeY-0PFw&vRV0`ECr2_ z9J~GY+ryLw2!}ldo#*@0Pd@p? zjYeQR_{{>sR9kBSlt}Ig8}BD9(qLi4f}RBcya)hesLa>`fDGt^4?d{5bLaYIC>u0r zpnoMxmZThxl8j>nBjLOqAtO_!tq#qK6yBP-Hds+$)?KnfSL$>EN$dm z%m&d44CZ9_0)zzxvja_q(Dsr`Xn#W;EExnKKRMT%1!>c0QvG-#u%FZ<=ddaLhRK@2l1bU{elX69dt#UGQ3gfBw?bY=zn{`s%z1r zg}X(G3fy_;6)u~eAprgvp@Oz9utmw61+*tvVUwm91|;~KtQTH;?KQE537?V2DUIHw zM^=-#zEh`8y6iG2t*HqBWMGrtPovuw#M`DyIm>>wiwh{zgg%ijAGtC84=&X&};T%DknWt^kJ z*)P_LHN{GGsXr{HXRa}ZO)zmK2mo5r)3h)3&KRk`OmQ56O>8vnqknl{p#b1K7{XF= z*Y*hg#Zi^XKQp`1>8d_8(YZ5$_C43QHF?cbVzJV=HOe8=uVqoLb#Z7gAiK2=Y@xZ7lz%KLVI>PJjJ zWs|UF=xy&{>VM^ucEz4{k&cHg5&)w715TN?2sG|tH#dN!m5UleX*g7B-=n?mk;hYs z>qFcx-z7x~K+M_!Fs}1X=xZ+tPC0dl;EiI*zGeRF*&M+UgS7IcUn(i~4~tUi(T|fc z0Aj#ke**rYwnqrN(d**H-qKV}70&d3k+cad88%CzmtPLMLf@x(c4}Cr{?}OnKnxf- zX?j^wgNE#1-vfq5&@>l^v&8@Bn!GMA^ukA(^s3qGDg47oK^!(33a8C3HQFP_7<(HN cQrEx!516HhVy*7bO8@`>07*qoM6N<$g3sVX%>V!Z delta 3946 zcmV-w50&up9rYfNFnL5KxKgJ38ss6<6Wh;$_~ z2_Pkkh88dgNCpHPS}1~M1Ze@3G7|$s5@bRW6v2oAF&YUX0v5nQu%HkN77*R+Z`t>} zJnr3cckkZo<9#!Z%)498e*bg6e!gQ=U}RZVMMPf0C={!l_lT^jM zrh+^C!;(IgbU@O3l6FhlA!)m$SKPnz`$1#Op+fp(QwW2xEbBXx8n}!uVt8FiH6@jg zIL5Cey(Q^27YqQ|Drt)`=97rej(mIG0I)2poTMMvOz9-4g`{&O)sS>*C?p7PEwC9Sc6pDgL$k`5&a zMj`-Mmc@H6bZ2^ZNljxAEpg+9s9G=SkCL83ha>?+&H=D23$b#$q@PK;RMIJNb1vrR zeSv$noKa2zuq>;&q?z`ts#Ab7-jAuQ2!9lJKV!^Vzl*c|J$3*r%R;7K zB59y)ee{$R`#ZAz*evg}B+a)?-)XM0K9c2ga<7Cf%R1S%!G9^KziU=v>68Y-Lc)BV zWt+y27-LY}u~2NY)3U7Ul4eM1=Q>fb46wi+f;0CINp~7!-j8E!u>fFMRwGFd+SbF5 zN`c7n<9{n6=^06bjWPfAyV&pdNC3EY{ZDPCk992^zxS1<-(QwAO447Wn5$U>;40fb zHb=+Umd;tFiL)WTEW{4@tkXMbCFb47fC2`jMm|wxhWy8xTSR zkivXi_o}6lqgi~@emArd8iY^)xHA7&k|vhT{D0$MVkL~Q!{xsZqZ7gc(7}#c&a=(U zYFW&Z6X6z2(>}(Sx#J+->sDS{mEDI}#xOuY^l&N7q zo7hojW6VCk8+`$A!wZkv5yFlqfRp9Nub{uP1cfYWOCjhR+)634kK2VCo3qhG0 z_J2CU7MQ2RjxzcPegGivnf+yQKP>x?UXMOt6_8&@OMU=6Ea?tOr6sDF9UHU1izw=R zV@&VR09clV!u@BN+|Qo-?w#m^8;mjV@;FMn0^BqrH&C@uM$tL2O;Yo8>CNK+aJ}83 zcD2IzZ($lSI;(Q?1k#%$B~3BLup){n9e)59yOjY~=4LGEpK4i0$F+a|e(l+_M<0Fk zk&H1q^UO0iOrN_k8qKS6!vkPp4=vE8^i@jWIhj0^rVm z6votCS1%kobVxgQ?$oQVzN&TW*6D>8UeGJAyy5^*lFFAauji#)V+Im=igCRMgjov zfn?I!7-Oi;JG!D%KijgbAG_sxQGe?(({l6ex8K&tkt4Ny`Eq^y@yGi5>uA-0%>K%i zE30qczPk6`dv(ew*_Rw81OU1Ki>SEFr5FHL{QMn+g3rZZS0Z26u3f7^g9hpK*Iy5I zQ^>23&r_yMQPZYPb@Is+XN04K0ANrnCEZjchEf5bDw!m6_|bcO>$~s1tAAFr>HDh zs#vk2h7B90;lqaq1|eYpD1QQO%l?ES0IqRu|5|ad>XT1C(YSHr^uPlIQNt6;^t97X zV`2y(F)v8r>|NZytjynit`BTCGylK~+b~DCoC<1^Q&gpS%j=;=Zv}lp;zWZ*+ znF>OfkjU@0ZQJUaYp&5b=bR&EF$4g+cJ0#o_3O2A(%d*b5 zBXu|ik>&rEEn9Tmb=Ucd9_0JfsZ({!Ew>cMc)|d{wTL%X1JCgjb@&L6KKiIGzL+pq zdddR;=z(ScaJlUm{3njcCt?JX({DyDUAk25+qV}no+v~J1Aj~Bp@$yw^s1TpbLY<0 z?YG~aF=UYk0Pu<%0|0xr{t$%`+)GgH`s=UvRFV}cRM3ziLo{a081e5oh>!+gKJyG= zmdwiJMqeC}uB0RN)*J z5CBU6U|m!Xm;?$I&C^dmo$>o|uHJm}O~>ra1z~F5bkj{D_LPZQwQ8j&p1}Axp7OK; z)&T(FN%}BkO`A7wR;Ny#^xA7|DojruI&{!uk3E+4JAeFcgz z2l52K8vsBG>P){6^!gny73a&#IYK=3=)snP+|ZFDM;!4sbLLD>F}Qc{UdPieNqGWb zKLC)9I_mdlNvl_{c9y?a%myEU%vCOF;>3xLh4tl^nKvC>k>p14=u;n`=Ob>dA0vI4Bc06<3PN&y!!6DtP* zyiDN@8#d_btFJC703wT^HEY&5BFO8*u~Hg0Zk$UAga{Q-7Zf!Lnr^HE2M%%$K}%y=taFhRR?@}HaXv;v%99Dz6ja*=5F?%j@g=M|A81VG6MiIh`M zji)ys?<1E?mduMfcI@cnDhezd{R;q~B7b#b90{2_@4Qo3J6?n(LQWTpC~9FXGoDq) zs`84$lHjLMfDBwF%EJmEA#-J{DM=FYtOC4VFCtGndh}@Z?%g}1Nbp;ZSUn{3k?VfN zy%g~Rzy%jvP)Gn2rzG(K*ofEK@1ukKe&2ofX~c*To->l?kQXMC#kFB{0zo$%zkf-S zCJFz;%jx1hSYNETK7IOl<{|R10#+4e#0bR2R+hrd?9!!+Q-bbCh$D0XqKm@abWzIG zXRyLXjT)t8%a(a6`_y}g`;e#bOM&JAfTLw84A!!&*b1|d#W08rnV%?1WfV34R5rt~ z+O=ytW##o8oS!P1PD+GJn~s4?ZC9 znj0d(MwppGEdqmK_lzY70O`6$={9IkEAlt$*5hUou_8Rxp+ko{Ta|Jll92VJBMEne znfXQEnjipF4cvI#7A1ET(4K%+?$K%nGayJ68{&wd8Bt3=8{Tgs;NhZd(KB7Ub`3r2 zQ=gv@0C>MGj4{RaJBe>o<$t3Iicuemea?r6srhGi<1gU6wQSi^#CXm?LqcMv`IGH%p%#Sj@2as-@mkr<`w1w<2JZ4@aroqt6#J*;94Gx(yN z`(F1j2_o!8rUIR9NyqHdP1h@eztOB?LauZ{7{-*6YmxmFK+~+_nBD1R?P`)iIC#67 zqVc$=UtkeMD%wis`wIFkl#A0VDD5d6el5#r(#nAh@z;|VG|yri9O8y^XfSNjk{5|UTf@Kt(18m>?joTk6ySA0CSxWPc zvc|2!8Sk(ymd33q3Yl8IrDd&)1EYY#?G)sknZOo~+q`>WnLh9Z0eRme*5=(QD}nUV z+!Of75;Q+fI20Y=Fvl2!=NpIui+`Y$Re)v0X0Ng%Ouk`p zGDn!C0io;=A1T>Gd`f`8F;?u3jxR18XyF#jMKmuWm!9k~Rw*E$6WZIMig71ki+ftr z(_?Q%7@1#k>V(z->E>S9QHI&C$XEbyEhP4!a(3{EaG2+>gPac&1eVwUNC{gIJK{K7 z%3MlI6qTqZr+;8jd(VLkg?F573FXYW12n28lpQO`L1tCV*AUlv#Lbg$&*r#7ukqxR zIqoSOv%lp>nsat=+S4!M06M1txMGR^G&Jx0xqHfXPOB|qU<%a>SUAhYLqm`Ssir`dZipOO&@E zuHxA~nVB1s>)X~oT_<&_QdK!xU5otLr_R}HuYa#S4~~=J zWIEluI~wLW|LNp7|8yKDFE8{v$8m1!yBrPqKB=bTEYJx5&^W5%{Hoynw-6D@R5Vny zQ4CZpR~%H_RJ>GFgupZJ__jq)1fIL62RR_Pr{j(y~0tdLn zRSp`D`cAo}((h{CBXEIJmF+>}&~8#u-_>kPfm5$obxFU|N7DBHt^&8HeXD+>@BJQq zWU~H&+i!klpzj%1zvbAJEa%F4aP*AR`a46x_?<3NqD0;Kl0GwkDpMpRZ{NO^YuB#H z@#Dv3TNkY15_|%$_}4?%ur{4~H_1 z^b_?+L*~HD0TY^wUAlBhCQh6vZQ8VvtgI{{DT^94YDi8_jtm<%Otx;_DxW|54(&BK%Q;$DuUyIi?sJu@>?YS*qUwQAM!%@bu+xg4s=AI@+esEA7r zA3iLtTD9`&a0QKLp?d_XAx{&25| zNg(tQyWw^D_3PL1sF^aFIdi6~OB>j+V~5cVfIr;Roqt^QhyF0!|7B%maryt%t5-5) z$PibTay)qOU{L;tdtB{zK#pwU; z+_~fFKijr#E9cIgi>{zwDk>_ZYuBzWck+!NtOQX1;m#OPr7(W=@891ue)aC%+th`R zA3u8Z;eGY$RT(^Zu!lQyi(|+BTHXb~pRqTt`2&4D^|nQe79LJbnlv#!U9ez*Y~Q|J zii(P4{P^+GxpQYvUhzHi>u?ExKiu0W9)+hH?V0)8vSrJB=WjJ@)->Z3{bP7Lx6h+S zjS}?3rJDTV40qj>6Dtmjyh+I_JKeFpR+cCF2N)vlXZdsOTGjC^q% zc9-P6WtBb z{QNt~EXaiME9JNHE+-7jPv!htKUlxm(E82wqxGxxvqr@4+|$Za{96&kJuR&d%@z5I zBE_GI$BNGi7Qa06j&H>{+@SyBhE7gIzJ~%LUn&o{I1}D ziG2Fm)Yra3Ty)SSjUN)>q4DX1E-B~(6S1q&J%TQ2kd`@Dtcr)m>!Y}@fPYXzTBO-0 zmVNzBzKKUU1}&sX+P;3!_mnq&3NqiCoJU9-8xi$E-%(G37&kSUn1YSp!^=T`)5fT) z)v0T$9+zMPTW;IbXWD8^zdw8SOm5t`Av<^Olx^F#$;FEo(D{5i*@9Jo`FfPHLY+qdjF1Lh1K`=zC&5qAVE;memVo4t4LdQ6xw zAyNCZ1K)O6**<3ve!@N{d(MeQ)G{l8|-8smTJ;Fc|063suh z|5QZOKkfi^@7~?yi+Zqc-#&A$Ykff5#7AAabTMT_$XR{@)b_{yBW%}>_kWoIQJ19zA*#(Jm7mf3$rw2BmcT4RJ`>{3B#PrSq>4hm_4f zv5#n7=%Yx<{QD~d!vQ~}Wc?Awf%=e=^_S&9s2uG2lm3D}i+GE(MsS z+Xui&P^?~QD4lXmleDpcEo@?&eTp1Ko+6Qb3eDp$ibBOT1#23>bD`oFP0m;JTdv{{ z#Y+YLqs*+>^5YwEa>Enhx8i?__X@u{ps$VajX1=0)6i$qQ4PndZG_a7KX&zw19?r4x^NYKHj z*A&KAoMUtEi8;sl^XJW3e7A1h%>6O=a2JI6b4buZ_k(@GJrM3b*}DhKGscY@C;j^M zGx@2cj~coj{h>pLy86t;NV9L>zOnS-;Niby$r5v)yQHK<;Lkmk82k%W-}ukY`|%(5 zjoRy7%*Bfrn>APr|3#D^KBla=3u))q(aJAW2a>KoA^mFA->@{2YCq^(QD53RNx4wA bfagL*MEiZNd%>mb_i9fBsuCLy9d!Q>dBe1} literal 15086 zcmdU#XJ}Q~7ROKQ4UJ+~5_=yRb?{lnioGFEMR61bC^3cs7(lQ1pY( z4a!=(H zEF+mx4=C!arm7OsV* zPoF-?hYufI{`m2umk2PD1Gu?{-jjRmQ;wRz`TY5FIe74(%%4ABMvfdQg9Z(f(W6Jp zvSrKUo>2v*b@vjoIAXT)A`Sj;vj~ zR(|^FCn;UJv>+y!%a<=N9XocEty{Os{rmS#->8ldLf*uQ69ejZ?%X+~T+josg4yQ4 zGJ$p6?!k51>;s-03D=3qLlgD}z z{k(VYUTNI8u_PoUM1|e9p;W0-(ym=Q`TOs`Lv+N5A30zKJM#!~aZkE_{kk-5+B5(+ z?Z8hOG-x0_di0Qi0|!dCZr!AQ{rX}1r&X&~^3Om21n7waKbXPJT*xzMym;|Kh7B7Q zV0UcVp+g7RxN)Q0zI|KXzkhFh{O#Mfa_!nRS+Qb;G-=W#pbqVwJ$tsidiBa}Lmc?Q zoTBU*cZf4PcI=QcWy%ED9y^~qcTR|X;fVGeJ$h8Ix%DHi$;rub?AS3k&c=ZswQ0)N z!|Auru+@kWBLXlaCML>-3m0q=&ueC8rc|p|&8<(rEnK)zY>bZ+KiHX9N2DuPu9z`v zZCIj237I>0Zp3&PEQt1x88b$xXJz#7-{1A2IPu$cf?&1o{{sgONR1jbT-{ZwRFPAs zPPy+3x*=|D+O$cCqZU6g?7@QvrfQt{$&o$B?ygFTW!&Dlal=A4*EsOo{H>k}iYkBJxpSuw|ATS) zw<+9x$&w|dMT-`4`t)hnZ)_vtz;E+s<^WO6UziW>*|SG}`Q;a39EQUh{H$ELa?-C~ zzkr<0V&@toeg>VLzZ7czh<|5gWyz8yO9X$ww!xVB&%Bb{bKSaiX1>XHARNVr-(#TK5(qv9oGAdCEk z93WtCC04AF>Z$x6_WXu_o3tJHlwE(f>%jC2^J(VQ#4UUo=HAY=nXjfOQ-`?+GGg`> z$#t9_ngPpm$Em_K$8p%1kOg{fh%2D=Cy#5Y<7jcM;5hHOCOeK+(~6Fh!?kSwHIx4( z9cKYoMQ5-4{hnz7;Qa7r@?YDL!_}{Sw%yha)-KjgYBFOt*N!Wt34KrktVtS~*wQ`mWvv zkbO=W%2H>ja+!RI(cfvk7Ro!7Z9Db0P-N|b47wvK(z3)MaNkn?q)fYFCF&vrS;!2S zn*;;mz4AC^7Gl9)RD>+z46-9)(05>@Tv6&)afXmdyM_k0oB#Tp{>DyUL$``Gglu#K z_=En(&+!xb>T44^(B+N`y~pnOc|>1h?#uo{R#ukepOu^*uWUBi5KC$?lLhqXI%@AUL^89aEfSy!oBx30jf#f=&@%IxJ)#-CV&9ye~B z)T&iWnl)=C>{SI3y3lFkS^z*>mi5gUGiDe$!RG;3Yh$gmUcGvCa#H;{%^X83ApFZ8}51@;E{p93ia~6U zQy?Tt{pd_lPFD`;clP9wi+uJaQ&Usr;>C+*ZHRLZtU-g1eKX2{?a-k^W*sp~{pdtD z-;DtMtk-bvgS9}~h<%aC+LR^k@kiE_unpxnZ$aHc=tnoc7(ndB+r2^Bm7Sd(pxYJ( z!|~(C&Dtw{&6+%GXHn`$OxSM=w#DYi957&j;D?cjzUbMrrzy++cQ?OF!-H)vK4$&$?#}`a|qbe6jZD3cj$FI)I%R$=1(QSvfwr=s*`b(al6CQMrM#yH4SuQQO8CsiXWKW!AMAE7%<7 zo@czHOn|@X|B}l8Qr@flUK!i@_a*l{;~ixvtE)67HlUaPXO;2kf`~GdrH;jnOjDP* zO`K+4#onZi^p zlOIw4$dMzF>VqMV_A!6z+O@06q4=h9{(&_C$9~&6^W#~5_+fj$JF3p=a zmo{zMm|T?nGlKo-1GoRku~)8KDbuD+GyTXnoj#{6t5&U&&6_s|_a8nG;y>6RO8<$= le|=*h=J7*)NZVk?g7r!38=m7glQmzDxbQ`K=r47s`+sjseFXpj diff --git a/static/favicon/favicon.svg b/static/favicon/favicon.svg index af01b222827..0aa909745ac 100644 --- a/static/favicon/favicon.svg +++ b/static/favicon/favicon.svg @@ -1,3 +1,3 @@ -|c`bqPK$ z+FqaWQ-=kp(wLbg&<4Fp0r_)q%Ju*AF(g-~{g^zzy|$(KW!(zjAZeghS>?l29Ju=b zTgnltJhCN82!`{PF?T{ZX=iO{Zd+q8WbK!tdhOxZ0eFFDi-q+N2YJ&}M50rLapMpJ z4WHTEAr4PMYW#JJxr&iJDPfe2_4Se1+G|(3(=>iX**3=SK=ssg!w}! zy^ybclJ@PcSf*8VK-K4DN&=IFKsZd{$ae4&Gc|bK_6d{Ar=3hrpw)cs_Ulb0-X3d^ zSk)NT#t-KLfmf+m0XO6;T zS^9Uc)Mxhf>gzww-w}J7blM(Vp0v>@Bv0Wd60mn#!e`+5U2m74E4hL3}XejfG($U06c# za?*2_0_b$YDGQ8AjPNGz zO>sJou+sRb4o**0&Q2eBJOd|W#IJaB*=bSN(*h@R$3JD=DL>j)JWPKoRh2VjbNM~Q zv+)(4(L)?je;{&IS42J~%fjC(5Wa}C>7fXD4+Clh_^=r_DzEl>8|gc>#0i;zZRp$C zK`!^wb&IChO;p~8jMAB1sK9H@!k2mOL3e}g!&B+1z(DHK+ND47)+QY$|Ci?BxHba= zdK{RNw{0cTOEHi zVv3NagdX{hNPsSg^>pOkfxW&ZCZe}i>*3&mES~WqvhGke80ac~=4ij0QhMi;Ycy;; z@=n0L@(@Cp@+V-_y8G){K8_Lp|C7j(B$vDD=aZ9fiTqxF4orZ(~?l`mZ%^)AW zUYzF6vc*`qhTD(g7sj@ic3}Q#f+hH;B7v1`!+rG8`<3^o|31F3u{9SQK3^(bw?0fZ z?1N1A{zon&*_wO12~P(mJRjjHJxnnp`kM`$XS(0|SbU_{NX~Zw1j`Q4s*J|z3|wD; zlwzthw?xJiiPm~=c_3X0lpKO=0xLcwY6^Px;8AtDr?N&j0S&2(1Gr-Rv7})Go9C*G zaB8M#zk9{|+Oyq>3R%U;Qo%>Moj&h%`fRLR;Mtj1$cG#M(i$&5lt2w&iWdjpY%V^y z?$olC#0CPgdT+;z%kL*~LE=0%4CHhD6c-ewhUUqxdgRXMmaa`hLUrTQk6<~-!Cv0q zUk?fFx_jQVJ$*PFB;uqy;KQ<8<<6C-@`u%-gGX~FSTdXlznPMtUfQkq^WsgxAx3XR z{#ehAeuc3oh9`03E^V#pBiDYuy)u|=@}^ag&@N~C{pBS{`AfDfx3ux{O!HF2vVeF{ zA!0%=YFa=z(YZSnRobQ69;AilS%OGnHa$5CHB`Z+g;4_dpq}8geUF8K9e7penrEH3 zmn?;e^XG0sY1o}uV()ag#Y$p4kD+x@G%Q{I8&ECq>bsoq8-mH6594qyqvx0`YzrQt z54tQW@BR0b-_3}iNuin@7{~z0qhNm%&rGybw>pM-U(1Z>Tvs9KHcW&EcO|G@C1gv`!ct}OTfvl-)Gn%eJf&WN za7jb^I#1#WhyWo(;sGdKn|^$caxi9?y}+N_9c%ji)ZX3eirobK+4cFmPEAkYxXzL{qayP?=15HT))Y3WO@Ma0#y zKM9U|XrC?0w7}mmnl(sBH7PRBpC#Vtfs*E}0WBt2?#wq~G~R9K$8Al9ZNKMClV=#g z$h+>p=6Ky;BmH2XE3Epf)RwwAK6{TTo_686ZGUn(Vxw7$^80Mt6+9~6WY+Y06pzAY zM#%;Z!4K#FQ2`yLiyEgt|6QD!flZg|LFEoj%(=T$ohpBxSa`zdxfxU`881#!Xc!Mj z+16o;6MeT^LpIvLiMc%WimO+jfcZkBODv>6wQ0oFAKM5z=6sH}NB7AGk$KoxJWoO~ zPSdMB3~TNa0lUn9%EjrnT5R(+?H<0na0U-{2fL4Y6w)v8s00h<)DER>dr89r9{l1ff<_HF*WKlZnd z(c(_v=8cHFs{?HSo0t)3nBa8GTIzJ9Fm(mKmB>J zy3r%_5RPxVU3_|4b2n3py~XiY?{L?%?^^MGq)4R zl@92^WMDanfUUqtoALW8z*`1ovufj|AVHPt5gtjv3J2s!%HdDSBfgtz`MMCOG%`xY z{MwN}du(04b`Y^X8m_oc?Q$-z{i=r>vQKHVx@dd0Iz?;sB`2|kn;voxm#hvt_+CtsPwi6Ov%^IBFAlWH|T7}NlAu|@!HpNp8E1CdAnJhRN5qY8}0@)+x{2jBQF0RX<< ze+h6>L=%ep?5*1#0IIEd!fThS?XKd#Il6t9*=xe^=J=)7mpb)%J=l~BgUj6yWUlhMI z1K1At`e78y@%u@hNwJIy^~9U8w@5ofD?01L9ELYoQ1?u?+yF4LP;_jlQvga@u6~}i z+px1yJ)tok-pQDgG2f|0W!laI6l8c*MgR3w7)6^twglQn3f zw`bSR*fB}s{l1mkB0S8((5y6V^=qKQA8=M0n<>_!M&Kam{yyoUM?z%LkQ;td9o7sT z*(>6`?W$QQf4d3^l>3YUXK1O7PUueEGX~wf4}=TlPia|nR;;PKI=9z^N?{fUuI()< zsr0zo$V9GW!%vPgF|8>7@$?>zsoZE#6Q^b3<}$wxp-UeqpPq7ra&m?;K7+C{=V{H;yGLoo6fJ|Hoo6z z^N4Zblxo>kIV+y#>c1*g0szF5zNKFWNU)Obj;H5ZhC~~R)&&SlOm*@qC?&nGwOx08 zzprz6o{1%piXef4PsLEXc2kZ&*}?HNY8G(i=ReV8B*?Hlv-a%H{QhB0MNE)pymuz0 zEvnCU<8{xR;OiaM3IKf#oPG2>g9X2yGqkr{=XV-G9vm(cZAU4jIf?W6@aDbD51-e_ zI1CEOxZ@u0G?pr#4>(yOm0maZh@83b6$ZS4=k4^9)Yt;VZ)sgoKW0+)2M>Rgi2JQG z-E(cr7EyR#%ym5@4(i!;_wk(iAqxRebsS$|aSkt-CJbvL0m}ClMK|`%)``)IRtd|N zhXlc^1GPBn4T3qJY>OK$DY{!|%4QGFW#2HMNZ{M5F$uJHOij@*Bfp^tWxt-eB)diyNB|^L4_de24x4+gxqR#~&IJP#1Zk-`GTN+&gurAD5SyslLSG_44?WROuU zG_U7}_fM5jKq0v_CE)uln@6r)N9Iz%_1PC*c-aEb}xE9fBi%&qRk- zevfGdHJ!%t*g%M>`41EAQ57WsYRu-V-r1{=Gl!ID0ToSCR@nLg?MwH4ZR7Z3oB^Yu zQ>X%wC$`P)^l@#m>Pt4fs+WfcBYRl$`s!~I)oe9O?IO!W`~AaXb-8z{7U%M!xOcMB zv_b>6V>z@9Rr}*p-1#2Hvt*xAOj_%-<9)4)eO92#3C!!tnchoeq4e9(i|ipp4^Gh* zsv0`MIl_cp80LE^5&c?toMMdU;3lbWuP-dv)AY?d+Rw6K2fqVnTu)yh<1K@nwjtsw zx13flDoic6zeH-==`EurNqd{V=jnT#<$6#fonknbs~P}y13}Yp=rI#e9Wv3Q9cGxP zkS=)MP$*{x7OOqr+v^83-dsw^tQI!V{I2cx+6D$e>;0p6V{=g??^uSe%!Ax3=4HP% zK_aT0Ydb^XAn#GIDveJ87x8&L4xSb+n~@;S`U6%$fJGlC3h**z5W^==_ufyl(jD>$ zK4t^nYvW5hl->i)Uhwqgl7;rcSZMR+1RdK1^yZznf)$nGH{Y&vJ|5pxHB-Egy^upP zTJ_)&iT<7Yz(kc)m_+-qu2bCUs_td^%VG&0x1FITu>{fA$s;`a74j95o_QFm*vE7eLHK3Z-7UQ9C zoq*Io_$kmT9^J%R*!l7U#o_bq9tM3KNDR7I(F95S+}9%ns$M;0kcq#xP;4%_3-xRt z5MM4+-uXCAi4#jUU7hHO0$%5Q;QcQ9*AyHrQP8vYO9Na8NwMB;#53G&~|iWXr6 zwwJMa)`P#ZV~Mg*`Qb$bN`G5jka%6A^IFsVqbFx}alA+>wa5h^eG2}!>=7@gRY>I% z6Rn+s;q_`1z&U%bw%p~Jg47@C{n^Gy#fmOKVN`cY=n7W`9xoa1LXS;`?lNI+q2E}0 zWz;`P`dtSoR-TuCO9A+TomEmtK?w2rIwfyiBo+-_M+u^_n-S%`hh{_p*_{_=;_DVy z1HB#6h`^2UiB?cR7@xkEVhrlXDs;yX!U3-HhS9B)LA%8-DcrKq0Ii>4?tUfT?_?Ux z5RenF(pkNe$h`p)CJW>p+VckrUcPMwJwwx@%~~vg*m}%z&j~jD(aTbFf@%9(XZPs) z&cPNG-!he_v;C1c84cdg_)I$g=B_8lvlc*sQNQ(}E zbT{V^lJR0C5`WzQMZcJ`khZkcYPLDxdCGA+rX)LLjg2$q*X z%7X&d`h&ap>ZK1?Xc|bQrqF=8FFk{Mr{d=wObaX^&Lb9I z25u2^All3!##rqKyfE<+9tRKd5zoZh-oMa)oP1Mllt}F?>J-8WxbT4rf?nSUIjJ^z zCOqrI`b3(QF|aOG9qb9rwS`4g^)L5ZU1~WvZy`Fr!mM3K(^VAMPnT~{tni0{=hXg6 zYCsu7r6w={q1b`G1H4Mpm^RRd`Yz0B0|=W|FU=yW?q=S5>NKP$PWqE*vm1oE7pD z#oHx5a#=r@0^LLHfx5TCJz*;bZ4mAaMU&4b@*nbvX^Gq(1NPa^YiD)9y*fGdj~1q@ zJV6n1=>jb%_O@iux&|FjEcQMQ@xX(%PBvEw=niE4J|(XY0y}_n9RiNLKL|Fy18|I| zAO^JEEsQj(Q8|=yppdHYq9A?y!YCi;w()u>C6`LHiGbCr*Asr&+Q39Zq64gM`VD#& zWDvA)kF79`988Lwszpvqzv(z~;4WY@MXiqn2Or#N=pohnpZ!!;$|+P@tO#2lNDcdd zK!dV?Cqx69^{}#s*rBBZL|w>|miQS0nu(cE9ulyjc_IMV$*UVtzfDDnPOPS`LQ~gMiPznHDm!t29)HAp<|~3e$}&42B~iY1hS@2fri3L zbgv%%HnM?X5iDb)xFMAuL%f5B_HR=A3_zHE29W3dO|~|wCHL5gu5W-kP*c^`Fh$71Jni3)Hdr_b`Zg*91NVc*8crQ?3xV{w%h^a zkHEC3_4$zzK?*cPJ7wowfp1&@)_-r4zsfVb-WgYrV zM}@tJ*(=xQcNqx>gb1KbNkhuu2`v*ab~~ryuXF1-W)^y-9Y1V9r9)-AU7!EK-@3LT zyKW%~TWbvPJ)ZISQ0Hks{>G=29ZF&j)5VKTFl9b1ax9uuezgb+YkBQdwz;cq8>lR6 z93)oIaN$RHa%a`PdW!|ledPksNRUw2>SNn{0QmmM@+vf@wQ4w+(K#ijl4IY$mVOlK zcnmS2k9rX;4;?aJPb3zjZ8zzaekipwsVwzU*^{D7o|ioE{+oY$!|P`w0B0@Pd2C7} zs(DUoJ6s&TB}ch4V7V~O`2M1)%vT3fdAO1D`V61wEmneAn&j!Rj`;aykR_8r zi;AB!%0&iSPEh5xT%=~phX=7Vf%C(Bfc%!5vpDme>DHAu z$}tCQ3H*jSL<23J!zZ9^??4W;EtI`*Dca2VMGzkPe?x-4^GDmkm&IZn)t@fQ;QlagprxMkJbgakW;r3!W08jWStKTpfJ)z9umKHxXkZ}%`!-(7lvzN0@C*R)jcQx9m%g2$w&G2#0Q7S~ zGF)?MX|5oA*J*V9(HHKEiAX#pk~1?l<<$qA@13sslZ^Q5P;yENQo-(k*s1{Lp$sf3 zIQH>K0wQW!PoM6{6HaS!YJubO4=l)6$+d_|d9PETV}JNmgyn4Rr;Jmcd}<&2oeT7q zDLJa=$3_<$$}8zxbDK}(ds#qtZd@=f&=gCq3uwtY$bdC72d(f;486?6^H7MYwGpyO z;tzW83@^lhu-tOdz}g8xLx+mk{t8Fx)C%i7=r$s;Tsm4 z+W8L89eT|2eV2FYt~2w$h%a!z8`<+s#OBuN|8zbRB^NdE0o}77PhLB41M=o5pq)+& zU^w>#+YRdgv?VuoUCh{K;UVOPt+iQl7qlWpXgQXzd{>t*a+bv9i!d4(pPidNkH`s zeprR_Z9b@8;{Q4uqSm~Gr`=n0*vzcEI|2cxkYEL?44Rm$EKbjT{`^<(K^OsTto78i zE@VU-YoMSMSdf-{6tc)@57DvtJDii|_*0b3bH3ghqkC`5f%1eKYEYGh>C>Uo{Ux6N zBM0wlJ8_#V1 zoj#i|VaUIW^s75*2KUbC0FcD6!{Z=zexkj{rQ3r4iPi}tPP2!wQrkdZuunRig#5V3 z_z+p_TXR**tN%o-gprd_J({?BH)LBXErA{%!i5!ktx8+vA-rEyYIb_Wkj4(Vpa%7luh_Px-n9Ft%R}ZPxQEWN67xJ?qw!p_T&wUyA3LC%NK?&o`#UN^^+uAqR%-| za2AbDd^(uSSmmtneg6B_GZQi^q(-Ao>TWSB+caaue}xXy zDG3n!%nYoX|7p7z0IJ9Kq`>zAi=T6POI%>N3%P&*Q}UMs`luTPW$b^%TTHL%bKdCi zdP9mgdrfBmxWN!B_|f*e(135rGQn{zsK&OG@%m~;L_R) zU0$<7j8cy;2&4I@YH0-F-l}5P(yo7pPZcRSA-P8bX#)jRi{&?cIFbsM%kAI3?QoNnG&f{ASoao58kEC<$03!~oLIJwg-jO!G(mmZ}e^3Z#Vi>98g$ z{CPD8HwT37KygO;3od?z7Lc464j<8Y>dSvmyK@_ew{~U^z4s`;H<7|}0#dSDEN!U1 zyG?^-IQ?&IrU^jUO9zd&p!BCH7iWe=bAT{a|A9*j-We*F$KlVsa9#gzNkAmIzDs&n zx}h`NJ$j&R`M_43Fp8+{3{TN`7ggXBlKJc;|6B)1>1j`$23vIZY%LohMh?n6ypfbU zZdBlsZ4_1cPr>P~DDg+s{APiNXa51a^D@8a?Wu8BH#5+n zTMbz%SF;+Ha@K(P5`+9}OYd@tOMf~o-AKLk!n_EkMHNY&$vU7IfJyqysHDn_z)QiI z${XHL#TA(tE+OFwjLbLccZu$Z;dP22(XQd%v_sC29PW6<=rb}kI3UIt0JQ>HOF`Sn zcUNQodH^5{>XTa>h0j{rF0?p+)R#jX?!E-)GGlEvX^=XS9zc>6kKQoFw?*amR4B6^ zu2zdst+LKJDF&Uv07Yw+@mYfTKHF*U-qrt(HK?aWf}}Lh$1|k_b>b@$+J%#8{CM$) zf76-1Q?k>d-V@=2V2DDD_#tma#z+#*EdjI=fb8L9!!&*hn!X+I$x-vE$i0OB%w3;8 z*$WD$QGefkzof{(?viO9b%kCuR7OJ=QH2KCn#V5vQS!t*;tP8EEJ^2!_vnDOp&cfRdJy`k+ubWY#kTAzB@ zXG!YSlV*iAcOy|`O_jzbZ98$wry54QB#;m!6J5F|2h8`~*}Sn3DPs9I(_6W%Wz6*rl4_a;iKic&9ZRL8BqJ0i? zMhx8@ybfa2MaG^$s&A$|=+v`$N0reecY#X>Fn{!$Pi@+wgW9rDY^V5oeI8Js$ElAO zr07BQ1evJ**`-!t$t(_NIS$VOue^U1blH}O{aE_nIhYYql8;($mA;Us*Qk_6wHvY> zQZ!*xFSQ3OdqV6j{@glxTUE%oh?~m`?mlP7_qxC#jK?6%24ld+`3Q034w@HZz%kP%$XJMD zyGL){QTS3wGQqJ}_=L>iB-7hy4BGUzTL~I1C#P`r_J^@wk?AVuTv|qUyY4tgsbzI{ z*Qu=MFNgQchR$pFe*BmkwD427ki>?EkebG1MLY4qrx<^;&aGYCh!J!yn;?Ztgr?AB z@UbvNSYxjz2JaID$(lH^0cjEMPMBOf)BHjPIAE_@?+Z||1 zhD$?w!}>+LGK3)*tzUQ<>1p@TjdW+?EvX16;L1Xi`(|tNnI7>#D$A3-U0$=t2hIQc z;zy+5-Ie`j56r{re{JvWrk_J$3I47}iThGnyd3lOrz9E>~_sWW$n0M(nb=G&<0}Q3uc;Z znBH=Cor+knW>$r2n+5itBSYif;>MQnkYmR}i4}72;G*mXGH_3i3(|)Wt5W2*6z8Cg zv%lSIp0YuSeBE*QEy~yPBGgqlf`5x+FoaQdhjuQLW;qkkKy^92WoG?P7$z-Oes(rg=tU}x(`<>TH#e_qq3&)=k+IKsT1`yF3gyi%IMg%ViiYV!-m6j z;SanaaV28nC$zBHa+~|8$5x~=x$I)rpGAX@AN zi0``eS&5G2+pm;e&(nvPhNQC^eBN>_%9trg$DnzY11wo#v}!sS=t5dxS;>M?$N0@T zGn#$L5HI02_{SsVB`M}#ZTtGf>g6)1%hW55=&^WV);E)CdHT0d%M2UT?xjs($ZBvo zRIle!OA}PC?UW((v9y7Sx?L|gLhdsZv@|(M?E2re_>mhIQajIl*>IZ;601Gq+2YX0 zBQYm;o-3Ee6K|)olr_1kiMkUrnc6%=Ac8+z zNKwMA7mYESyGo*RNOj|55Ug~Dyqh5_N))-a#yn`=mvHtXO9ogtVI>B{=t8Sy>bs5n z_-c~$EvpD-BynS~^~#U#z^_;y_OEt*7wEal2`9V};LF~K2CWL!Z2l zox8uVr|0k+b@A9k?9C)Y8?byc?0%*sSBVx%N!;QA$b=a!4EbPV_9V|uK1&VCnCLJ_ z=JwkUKcgGywp67R$((swqS9He4V4}*OY#OaSfx0V4>e>OB z7<#VUeMfYeVKlhmiX98uW_pQnvYlTzpEM~U(D;2RH@gFi3$1uacxDNW9AWBVCTMaj zrc#g($>UxvvuX{RQ`YN1&Xv$0#vKXN%p5d$$l zbN75Aci!FXhQ-$-eag%}y2pV*`D#K8-hDC_TwTKPIL6bE4CGztsuI9& zW@5Ibzr;vPX08!CeqFzve()XTpw(7tix5WP$GeZhh+_iG%Jw)LmHzJp1?iRNK5s9{ z*CO}%m}WB*JlPjhTLU(Qq-NGJ-~IHGuVeH9EfLFjSWTtw&dO&W`gyT)tQYGKp4UbG zv;tSEWtmbTMTx6_x_A73ETb46@@^Y^bBqV`%$G=?I&je-V;Yi8O; zXW%%bN!Ts-c--eeU2?LgVJQy{I4=n9k{rS+^iiw1RaC_D)ez^S>K|E6+JTAKcbT|1 z>2=?RR_i3z%8Vr60?YptaV0>=*ZSn^KeId4pSbs>VZO^l$=q!EPz^DTF%J3`C}Z|P zAmm|*l_=85^V|y>!bi46SBi{8(}JMcvgk`)NI+f$j5S__XV&->v)|RWxiafKG4tbu z^tJTMz>w2O`aLSSCrc6~?FM|&!`5Hb*pDbwNasDGrQE;f>eTlll|}Y3e1Y}*$y|2e zTV7EV&PHgN|P++3lv7->&OYr|V#ZTsZX~HeiOUUmxLhEBwkpi7K z<5Ry|UwK5FnlBJN&u5I#>cZn=OfLTg9G(@=bo@Fb}S%ur7Sd`mnnq_z1wv? zxchiJ9-4kKxBUt@Y8UKubSlfE)JQp2ZeVilkOGaAhb5%Dsx6ebI6CLghMFoK;GW2c zU6|rg4j4)`4*BMDei1SYw|x}r;O*r*!-do|BA+*#GSjS>UDjj6C``70^ap>D%!)@f zy2lXt%h(H?%{1$~_MY<}!5bh5#_S>XeVlb$aV-8FrrapMCU?0Mb0e4;aga+5i&OX=cVeWH7u|a_w4%yZ_lqM{HmWx zRJlJ-@;*VHI3mD?*KbCvr;|7+Lmp$TY4NRYv}Mj0*r2P`rpLdfWwG$ruq~z&M@xSf zw$p2X%f7@O^6c2olsz8gi&%~1!Btw(CVrMUK? zJjQ}eg*YgFS#`f>(RlmptATl4$9QciCB7Vd&xaU30KJ(@VgB$ z|8UoK4R~bH$)gO2S6)~m9zZ?Bqq81jy$67T+^D~EJHV-4k zkF%Tw- zPlwS2aR**0nlry{P)tXABTMCT1~In`S+}t0n-*AFV$n6SXR&%oS^k0;jMV3^_*$sz zG1=u2PW&D{Dj1SjqGtZv2c|=QJwgoL&R_IZV7>8sgcGAM*bWn(0;`gQ(&BNwrSGY5 zsu0Ebh zpoB_BnR#Z5U7uy-HySrC$7=b=5}^vo#RcW1Z#ks2ma`jq6+K0f0&EYN!RRg>Gcc5N zcZl(2MdkDj%_u=jo12al$V04Lvg*3?qbNi6D*l|4ZF?bm- z=S`7YS(S>;G?p3{I}H+HU(CjWh3b7V)f#5xY}Hh6^N>e8DL%|4VSOwvmvvxr?f{(9IMqAU$Jjiv6&i`29D&x9hU!sk-!B@6K*!CUz&3juIG zdnJx9E$YJDXjZZaq-H4B4U>tvfG|eo(0J9Z`Ew7mN)AlMbOD>I=UM+MJ2#$!R({hB=SidxO&*{(bHpY=H#DiFj zJypxJPfxq4WPzTR(J8*!gMCdgZ z-lkkWNbU(KC8msC=paluhCk?AZBGlPw5K&U|K1-C)+T&CYIwm~=j1Glmv+FwJQz=4 z=fFxJ6Efjm(JOo!d+2jwG0`kxHe$Lwc2u*9ZPE8*PxB4Z`rhH|>8ul}`NQEH5tIv7 zYt60Um3xHLi=DwM`NKLh;l%~9(WCmaL^EL}5f};4&>+r=TmR-%KXH4P;qY|0qqzFL zqDZ#c3%wl3X41ulW@i$mZf{g>V>^F#A+mVvP;=`l#hF5q44XTrx%MLX;fP_Xpv=%p zt{!P&A@F5hoS(WN2=|oEAkubOPj|c@y8>KCivC-7>_M`L;=Q8SHXn)4PhVX#0X^oNHmt;_z?!x+cw^li z^x0mrl#QaE*X)ewpv4Fie=m~B9~-EAlwD2G05%X}CkRIq@nc7QL1>$zbNi0gc2A1| z7R@Dw$4)5|(r*E^vkL+Rh~g%YoiG_Ipi}KR{t6Eoy{pp|CZW z=@^t>a)5u^1KHlbdI%j@i#<)$k*#+`lvB_!VS&P24d`RTS3@vqc00NX?Gsg)9q|YYn>u-e*9_46Y3h96KKheaN67_pa~+3oD9%p*o0t(MR~*n!Obkn zXT@lB{@rX^ShZf5u+rDH6sHr2AeI~&RroLfhQ~+Zlw$2Ixg*-S6T#;l$;68w=avY6 zsMjPpW^#R{x&+A%-Z(fi-@9{wxF5xYL@O)%Js7>ea*)J^G)T98>l*ozG@i?u2xQ{2 zir4N!vxeTva~`%HjcmA5+0^Ny6@Gp6$3UHt?}gj>g9oxXvc*XJv`7L=r9QEwGpQk) z;dS*V6L@?F^R(EGWccT1jkFDyzU7_Qel&$CW2@MCN19|-$;c|;K%3$|85sJ>{plcv z?3~Gr^fRwET%|`C!EwfX2@W(;F|USn(GlZ8%H=zPXZOl+x9TyK@JXk$>sZ-HwNhM9Bb2-)H4mEF5OJmTs znp>Y~8uR&uLHp&;vC%BYLgh`*e8O9CBi@Wt{Ld?_l06$6Pc(#WyzE@m@|yGgHfg7Q ze09~sb(`~0%T$m(u`r57?S%lT7#k%(YACozIFDX2=`NUVeP8C3;&2DE$=112HKl^r z9$87fZDfZ2EkPVY6t-3bvn9m?=nOF*i2{;J96#hU!wlUr~pf7A*;^q4U*GE0{uiP@PYfaSY^m@4#^vK(t8U5>7tgy{gmlFr&~ ztufbbk-E8=rOy(`gaFQXv~uw(p&k`0LGp@t%J4V?#_2Ck*P2^m3@Pr(-rJ##laV8; z?TwvX$B%MyMCdS>(l3SwYGK8RW5@xa?w-@udwHTMZ7A&yGu{~t-0|LF8FwB(F8%lP zUZANWe22HhOfg5adC-r}W*aA%9NW8_Tkg>df@p+4aAL&uS!N&bXQ0-*N0(illCYFP zm?BCV?NO$f6*@wv6q>2&Rc8sz9)!#UV}{$zI>$GH>N_ha)}Sx$&qVb-hRbQhx0ayG zcJbm9Mb+j~+yi(B11u_ADzsXiA+p*D5mA6U8neb9f53@w-c1M3=>@bu??)&n#jywV zj&jPtT!20O)9-ZUqKchp#fD}SvrZG?DZ^cC5Mw)P(Wz1vtF)M-6x~AWE@8<5Z!FSh zx^eVlXJweePDbNLt(yjhFfI9`$cS5VWX``jUa?az?PI$<=tzs8GG~_qZ+Df_}Y}=rH*spUOR~} z?x^sf3)QgKv!zi__VOdzM67-Ahm7fukjo0Z)0tqUxnxowy>k#NzGz?EI-&2hL1JM< zs0WYU=QIWey5GnlXx%Ojpufe2^1%>}g2F$WrI+g&?h+jtF*T&!oN|(8v%mzeWg7-s zG1fes1DzhCK`x@tmL+%rftC_iiRYcSqca#$F}^x8RtQT6@01W5NfwmHD(OlamgrKV z2;sk6XCoN+DX*&>!3@n5AkCIam1IW69%mNvddcYX!L@^cIDH2yObUq{pZipD9Jd6Y zE{HMMB)WcGOsBSD9P!Z%S6{(W-zG-fK>{W|hPI%x@gRWlyD}Dw5~F!k`^lf`0^;d? z3@UfCBEDU_3!e?*s+2w7GR6nww2z$E6SHs^!JDFpV_(`@Ru+r0%)>*FN*3&`(Bd9f zc(uwW2}Vc&jQ|R#L5R}eGhXl&LlV2ISR)AzJEpw@T*O|`pX>KZ=m~VN6?Ep;W3(gt zqFFooys#^TiR~74tR7#7`h(z=pc8*}5`12jAyjyt4&%3I0%G0@ zCl=pJ2FBk)6_)x&l++3xMhEcs4PWKlB``=~33`NYdZyA+BLuW`w*AFZ;Bkyc`#NhR zZXA=o5Q)i27~XQuq`7$zb?6|mANAwSa<MWb@rNUKqC(i(kIk;MU)geh z8Y%9^0IlG~m4c9V2Iqe_#3_*nkDRt1H-Q#&JQE+Tz!1qd`VnuOQh(m}@09%)IZk6n zq<&Wim7Sn8(fBy_(4)q#OQX|o>2--m>P_lbdzLIG4GdH+(&g|# zlX!p;%7&Tne&=|qX6hl7pcqjSdC3v3b>3;k!MWW?P@sHW|1t0hWx6HyijHu7H!rGp zJ8-Qe$TxKOawfy&L=eNU%R+iLXwvFgHDX6S+T7kLM(-V681-p1pD8liy!i6uNhejE zCksXMrB%xR4r@#ZN80xVDi*K^IZrUeq`_bxOy`FIqfwFTFF`ooh|)G)c@7&dF_bn3 z?v>bcw2c)>#JOEYG`8bu92bcH8dy2r5U?>ZA}u%2_ECGUUzH$nBO;6#twmN)G0F6> zu22%Gcxizm=4>)V;#L<6sSH5ay=^4t%Y#Gk=_alXTA~!7qA*n2R6k^a; zK2<}jTY416;2Le0OfpX2j&h>sdldYOcuvv-;tIwUJVy`TY_B(cDv6MY@ES|c&)3dg z2AT*-0orhX3V$lx3&$b2m&LiP(I$~0e@~|9Eo$w%rey4li+dc^~w#ZD8bpcs_chVC_Xh}W^M1Lx#3M) zF^tfX`0f#w&2!OM!A&~dQr(p9^-6oX!gstQj)p(0-@(woz+j0ElBe+`0zx_g(pT>mj%sh>8>w>+uI2 z%(9zBT-W)lGxqj*ISP+81^P8>iojY(&R&v8Vcz-QOGV_F>(*!Y#l`QUw-WMpz?M?r~`@aA1*{pQ-_D zx+dS0#(Fabe9zr;b1_sB2hMi10ND{HvNsEeaK7Tzg%zIR>gW?4leJ>r;7#8F{E&oq zZrk3=v1leqjJy_`Kx?AzJ;F!QxzG#nSub4w_Cfjqu#B$w^c#;R#H>za9aIl9-P=v81wn#z0W`klPSo*WBxu=0#o%w1 zKF#8zYNbp|?rnoZC4z}_k%O6OT0Q&578gjubf@9R*&k?&@0~rKqsttNaEl|QD4PpA z%z+R8d#BGW_|8nS7*e{=^2SCRnX!8hYZ?%bqi(RYJDW>^Cy3iUb?S4TzsW6DFB0H= z-z9{g4;v6^TDePSzdQgo_f0A{N?1uLrKJfEcPiI!$;a$1ASR09{SrCPu0r=ZeUa-s zT!+9pvE~{);{0x7l3bqK`EMmMS_TyyB}g;*iqJ)!FhP+@P}!=WdDHRrAfV?+>-ph4`tV^fpfjk=nH+c8w? zxVvE%DqEY-uJpNIzjz5o?Ooyx28;@c1q6V~1J@+PtYREF(7j0d{`yU|uBwLcLwiQY zPB3ga~;pHSM26dS%WvyAfC=gn;`#o!*!hVhx*LNNo3Txp5YMAB8S>9vc>;(O02$<3EvGav2s2|0E7 zUzWIw)kB9nJt1P037V*UuY7)x(<5r7b7LF7dbaZ=-N8KyWDfW)t+|+&Kx%U`}5iz7I_36R^n(lpqiGOL87$TVxsH``tvTPS2 zqWjNghb6IjkV_f#h;_P)J8b7pi=A3&DZ;eqY`D^m;wPH8XF45)@$wJO9d2W9BTtJuOER7Ev;gNox&bbYfo}Slb|L$N1opQ)W zh@e~5ucQVmd5gDRP_Stlz}b#y1J&dn#~=}bp>umHDjO5bBeHHAAqlxxcJevldT;4` zjuA4FW)I3IRE+5QXf8ErWp> zj*W1~)~Z>caiPv@n3=FR*gy}SYV+%d=+*rbo(e&t~F9MKyoP?Y%P} zwt|Fpo9lf73Zv*I}@K|`KG9_$=!Cod$;<%g{t#q7l_2QopAJ3idvj7Z_Y6_w%~nG)m zV6jNn2vJ}pxC4Kh#j_V&?8g3GBJ|}%U_vYUq40gPz!MJh+GFD2V+CG=GQ@%#`IP?| z{Lg52Gl@Sj1#<@c{)jy%SOxpoNzqe5Gx7h~LW0DC@G%E}roDqbk0x4!%#NgO&p+)v zUpW51KSzT*0?y4w_?eIMDU~pH;5_>**fJ!!D)KO)TW|ko5gr3VQ^eLJ(-14BIrL&z z5+`2$8!e=RG$?Z@oETwMQY6jmzeFj9Qs7Dv>|+f4DnPb7Lnkc{DyJOLBq<R zuM+Gf`dD-&Gll`{=-i&Hw6>jG%*72x2XuU;M(DE*&w%g$g~J}sBZUAzhEUb~QlbM7KSFPen|`S zjZyY?1kw%v?Zx&W){$T$$jd*efE`5P&I2{%r^HNo(NfHRW+8H+9T1;;N)vOHuRcld z-S^9#@d12NCEcs=|F!{}8C@n$!`W))hA>iM6YeQ1ww8+ z;q8ooaRo<2B%_abmhu%QX^YHQK@}I$Oi4pCT;?iN4z}!PC%>lM*(6Vn{cyxLeT-tB z`w>aYJljyVOW9snqdxiZ`fNTbyzDf~*&W-Z1s-H4Kw;R9rYn$(}NFN1oTQsEOI66 z^-q%3BR9OQ{9!VKSj`ov8)Z_*GtGyx&>PK^CVoc}wBe`8EW8(gNc|{&d#+;5f}+z< zN%DO#`7zU=;9`yH>+>aECqRQ^*lO?T5{KhQyaokWg^rY7!i~kLHPZ=u;HZxBLl7f( zA8(+Ijkk-hLk6v&bE%_=9aRzQsIGbdwq7y(F(Y^{WrZ%^ySP{tr{URuq~<4{y1TyC zXe?%gj=`p4j*x8+}0luf&) zuy(EJ-IF0+C;~D3yc8qf_3Q+5NGP@}dqNcQ`qYWScD@GU{i@~^wbovXUudk&(!y;_ z@%6bItk`ukMWnuo{55l6+q+@+eOS$;F!j?t?ztzBft%hRiby{96%U{Z)arWcX65m& z*ZODhOi>ZWl6(hlTgR98QLYMN&yKUK@<&<^feT=Yk+hhC%TMFsAU!iL*->(C#;r_! z+gRzbnESE4&_K;o3ulV9eJ|dXpLmIY%05?M{nMlT>+Jfxa0u&QR2-;1oqZ~ zR`HZJ2^h%KF@2=e>ln+A<2@$`!y7E$#?mOa8M!@VcxbUF7#yk%?I_<0-*%r;ttJ@J zcn-Wj(e^~XNbILyk6GJDRq+29-0tviZ!wBQoO8SPY4-Q_C_)uQ@!J6YdrwmF_Vt*o z(ZH2K>S80i(v3gHwQK#?HvSfrbRX@yZKQutFPi85wI6JA0~Hk`6<$LZa-tYJ+q#EL zfV!&qe2Q;?LRh${b36a(w6h9`k6++g!|CsIGvO3&(npJyy6a=jztkWXypx%_@x9fE zP=jhWG#()wU;9a;Ia^Bee!xp}=EDEBX1P(gXp&{&6WcLspnsE(Oq(;|!mt>tmK zbwuPCeYDQOkg&TC@yZ;E~cF4k1o3pjuQ1 zJe(6)q|m=J?i>h#q+NX7*l|Ua4}bZzkaM6Ehpp@#fcq>uBcy`1G(*pR>|_b$9oXqS zzP_(Het$vK$OG3~n#lHSc>*swRtyRUSj(7Xi05l-kI#4>DW?%~&b;3}Z@ibNk0SOg zB)Pg>qhn^2;TYSE%0;pV^_OebyX!l_V*$R}Roq>^yPe3jRAP+srF({RRj*aV2|x>>$GT!|7q>dB-r`gyU-hhH*l13f zge7Qi$!KqoXzX0h%O6JzD13H~xEL_h%O`QES@+$46r4(Kz&0k9PJpjzLt+mpl-g|a z0`m^f(}jFdPStm|(T8id1>5lO`bbIV9X@pGP-p8$-;~Or>{m1c<~;&v{0FZar8(Ht zASZI{Cr)sIZ)K)S85=G)H8bX*g7-wD;}x1XpFO`RP8fM2 zHs4nyyf%Wz3;hkh@_(}cd*$NXYm9(#kpNVmnk?I9kFqO|#4ANVKu%Q1povaSy$$j) z@AddKEC_ZKxweoVGquq#2eZ)JJFo5mMvO9vK*L77cQEs+3Hyw3cIBiVDqG!4u+{&8Y&%mDaK zb~F`L47+!3H*8J_=glQu|H6&ScbZ4Kc^aZLIFB8QIjausxYrO*wZD`a2)mxC&?O}D4`eI`;}pXuJBv$=WB zi|6$@LwE_wdRx?722%z{B5Z0SFHJp`0PYCH1DxQ`rWH$=QHhgv!1}WXQqs$0ZQ1}ri{*D(igxi4PQah%j=02#xrOV@JnosKdydau=>#L;Qq~(&$93xz zp^e^4GH^3Cs|#=Kt)Fc*`4wIpl=4|03K)A9qwpqZx7R?72_=8wEfF>s9#)J6@>#Hu zWX4M{w0|BS5xg2(_4@9$(~tKqO&+X+pA{+QH{;UKUbRbx_OQc9Ii8lA2bT~+H4DHB zjn2#P7cxJ-dbaTW(>GCyF@u1Y?PeF`ZO@?XeZL;H3Gqx~&ri8F2VSh^ zD{fcXu#-*#BSB}T!|i8c!BnE`<^Vg<|BC1n4H@Bx6ES?h1gJIh)y(9Hud;SJ)nyEF zW&{ZWFKw!JiYCH^;5iP%-Vn+V+6ubZ}v&Nk^})+Ru*z0qQuSS6n^r%3QNStc4~J- zY=mv)m>PpmL6#jQblWu^UYbu7P(k;VIEotWzxg;!yS!ftywM;(h)ZkuNk49Ivm|sO zr2=|Rwg%x2;zyVLX?T;%pX6)5BN|9T(*8`smfN2w0TmAX0W2r+R3yK>PlOrY<0fe& zvB@^Uubqr&(UNSO+eYFOmn?AT3)v*T&$%2E1<)J4qe|(+8q=Qf_t04<@ z>s`-{*kgr`9uB&7IfCJ=9wGOZ-FpwWfC7zx2^VMor)aGIM0Kp>xSa!pjoG1FG6rCf zk%R&7$W>+9kK_;qifHH}Qe63)NsGEe-k7Txd3e;gE(V(kncV*Oo-sN4cP{O&{^%qh zrs_x|BzYH0 zE~8!H{0R~sz1P`ipvoUB+n}&I5I&NWKt)I@ug{Gu>uu-3J^?uqb0f!f*V|JYa_ePV5cq*?j0HnY5H%xoEr)( z^q2dFyZ4BpUZ6#XQv>G?*W^%ij-i{?kS?CNZvW2xTwz(W&{|CI zjC)_98}&~^k5$Hq;x8M6dxCsH-#_XI27NFv+4((^XOR^@f_HtU@Jk4>e_XA^dcAGR;&^WtG!pjO5P!c$gc=X+dTqb8N2>|J_FKNg3f#K zv((K_{Orf)?=PYJRGkm^!=`9=vq`Pvep7>W(FOmd66>O;UoBfYOKO)}U!8v5JmOW0 z`+%|}oZI;_$P71f2%Un@`P~VdgMF|9zoT~Ek9C)o#&VXz1T^qENlrZ7no|tP&brOO z7>dp=P?{JzLyJ`d=fTx|IoGSteL~l7&ub3CY%?`fQ^7~&EgeU7?;(Q9(dadc;8Zn_ zpB&eUt6uIt6C*wARNIvi=BHNqPDWA$d1%s0O6s?mdS03}=4FMuCtP#+`ZPBqavYn- z;5_{oV!?gqUcc~_JcANz&8lo4x#k{^h2wE`xlBNsdhF~deqP&hFgScPiodJe)xw7# zJBLSC+APM^2azYPetSl%`s*^cN9hMTn}DI4a~~aR{O3ONZ-;LBe#qm%MM@ibTB2*I zlx=H{_7tVYd68GU380ORER$`Ltl+OE?P-P*^Hg~{6=*X(hd8%SpV(tP1<0LD`kqM? zwQpbSy(YEnMAW>}|I-Sad8@BIH?qVePE=bDUvo# zhORkQkenxwqJI6^O-y^R=G@yJpYf#g{TUfYHffB(<=rpwp*!4-p+kwu%Sh6z=^`3WZvh%K^ zooBdwv~8?IbHQU532Yx5nD1E5G-D(jp?sItkoMZ8wh8#2`Jf4NO(7kL78Kp2WKBsa zl>25fj|5F-RL)n=5HkpJ@9sx6K*r*yXT?xgnX}PMT~=Qde!6OW36Ql)TgRYbGi=3) z8H#+>uVEk@7Qr(T&YC_qxs@#n3TXV5o8`!%fmNEoX-B87RrQ9O!5doD;hj|1%(BVs z!nNdF!^wI``|RJIuPM2>Q?)<+r7uZu;^70-AS-V8&I3uKOUr*wS>gF_giPwRVa)H7 zGW`3$NV_f9odg@&x6J#g#|ZkdN@Gv14BbZ)m7FN}463GT$n;1^ZLPI^ z1=2dnVP0g>fWhlmyRvm%hBwX|B%KpUwIIP_@&2?J&utdP_EVZu*|$7ilbYj%Lln^m zz?`M?@9 zSi9F|w!c}jSIAm4Z>28ued^{;cg<34*m+|YeZqsaVs(1-YX!wQ{umv-XlUtr14+{G zZTo;y4{33MC+yKl($r_;=SkSVn?+xSsM4`WaO1gEq#lUjpSj2BmP!*Q{(vQGru~?=TE?w@Vds~VvS5w$KH@iZDD~%F@1yRc zKyeG-c4Z5X_TTUS(+T&HUwY}ZY&@|&Rb}o02s<&Q_HaTH5#eSPR7S&azIZ7=BPK1E zdhq~ad06*i!#sUYlgCOx({U0XBG(h3_^s2%e9@=eU-u zTPxG5-=~Xf=1&a`=jitU(%ev@RX1G$Ms=2!gA<}QvR=U*IP9- zZf;-G)G8jZYtC0bKmTu;AXLUa`kq@3P*^p(g2FNQ($l;e{ew}y%rjP1wb8S!BkxVM z->I7G{G@Gq^#!KkkH}P04F4G9eRZjn-4FY?-MA3g(|v?6lcg_yF&Cx-Wh;mNlSagr zSS37T$yOUDyosG{lU6X*j4uBz%uFf$YbPYcPp!T=^Lr0=duLwQ8A|Ded9^c|@#P^6 z!lCC?%tSridY+hvz-MeyJlDF5Gm$o8ihgm@f212lxK15Iju;<8dquXB4=@5d*6 zU`{}h0cTcnClxg&STO{Pu-&LUYn%ub5=87?RpWex4U{c6fith0iIRkl49zi!(UEfV zmpRw-?sdf1zj*ir7&lr?t_VyqRMsd>++vbV<^u<5D%r66^X@!A#!~uLs%MHDA){9- zDqn#cc<;#^?7C1!Uv64ZWsLj#J>NfE+B}_M9=kbgvp+FaW!n2pE>llhofB;hxE&kuWFGe~VVGQk<8S5;<&2T1#|53vy=cxv51^MupI7(RIqp05fu zn7VrdFsA1=Pe$B?aKIz>n12w8WCetgF7Ll+*X`diitinM!5;Ty*ZuCAVn~0-A78VX zX(L&wTTe@_Z?ml+>hqOj7zp5ZcF3I$?9tC2@gA|zS~3uh_3A&~$bw$Gpjg2h-dg4)&Y3MLzvJVfbRU%r2Ran(D@ny9_6u3qQIfY{asvaq3 z{Il1bd0MX|UySat(Aj^jr0#urVA zf`|SSU3 zAHsVIcVKkuukjeg`m4K0bAoF=hmLRpvJvW4G30N~hD8y-xSdQpZMw9XqvSG^OG{)2 zeZ~V!Caw&gT_)#XO%V!_G7{cBopZZlrHjGZTe) zcDGi2RfXXAXPAx}Tm0Q5P}Qdfhak(H2oNRGE{YH?unDCzKH6~6Tc9q~JEy$Fd*0da zUs~4Z2kfsTo!b#6$Q3^Z0eK3QBor*(EzLgPA+%^Vr1}2pbYqpcF;G`K;C6KCAov=m z)FX=~t^tR$Xsj~%D~Ifa9DZv$udeVDthkgj9zoc6R~dpi^68V`x4cxx7>RTE_HnNP=)Q<*0T6%Ht0iBzww*-PLX}Ro0jm)TPYosy7cm*!Ps20U*!7VRA!oX?3NR*Vc~F$wN=cmKgls~wy*n3B_-gpt)0P1TCwga@kVOH4 z0{BzMORgoy-QQf!3YkB)vnCvKbh9^bzEfa{RrM!PvGI!MH*^X=VN-;%yMPbbZ12JQso9DO26eTo}aHy5UU#JW%PpKgo zK|;i`jc<<(vvnng%vZJ4e8Tp=K39ISLI!8wYAcQ;7G>q2joL0_M+BYwl;BqrWV|#u z=XDZi7_YT#thMxsdjn=i?y0&Bh3Tcv&$JrUbNSDfIh!7>thYkE&>8$4zsy*Ece_{M z-HoLt{#z#^?cKOQal3)^EKUlm&qR<08{CB)#FWV}DD3UgV0-B3)1dG4(6@Qn;!2dA zx2M&qf-8w2p=b(hwX1Xnss%H7PpJG1C4bE*yNDvD=6{Y9=K)_2f&ModiLIbTXVUa# zcafc%Vq?C*shj<7d2ZV}l$PdHksubVcks(nmnYaPq5WW!rREQV3_|MR{!>@P4BqB| zpESfrtq2G8w)X0GE7gi>Z#Dw&yIyv!MXV>ah5A#yS27F~H&B92)78HnsyVFU0y*?| znl^Vt!vn8~#?4U=fJ^#C@n8a_{uo6n#juvL-Ea<O(N<<@s`3+=CNstQci5%| zDir(Qk54?f@Px6JvNfa1bvq{60;bg9%H>ws>&=3$>dx=c1Qn|XwE8l@{3V!rP=NX= zN_=fA@Xo~f1^@Ey`Uv|LYUBG}V{OOt(#?wR!Pz$Dh&?$R7R#1f?AmuB|B`=QM~G}) z8hjuD^V+)T@9l3~z}!^dIu@s#?Ry9WBb5fWU@U7LJ}qEcpn=!kRoZc@7!H=*tHj@5 z=l=ahX}_>3cg1-BBh;qMWx>||Qm4k{4~Iw{a3GY}ZDewQcp3Y{5)1;8DI?)b5TF0? zMvDJ{r&{d3V{b~CxhX49PV^dCTZ073ZTZvd#T+CuQ8I^JXQzwz(ej&Hf^vtzd#Kyi zYCF!e!+GGe()k87!bC`u0ntGHf*WBhOnpz}*JnS@D*MW$?(wr!d5vqgD4^f%AmqDm{^i;I8UsH-z;br^ zoh(Dk0Fh@PkQW=$i^x22zW!s$bDBz7pw==Ye}WWcKa>?$KV?WH!hwTAP&xO0p!PM} z=OR24nEzDO1&d)yRQpdXvX~C%xyidk8up)#+Ky1m)Z^p%A^)g;-}O(NHlFW#@7F{W zpE`iNM9+^j=+sjwA>Pd`etkQFT2MVoFSSJx@40=x&nd@nBoals2{f12TF}}dagO~L4gni}ViZhWALTAL(=idg@c_S} zAybst@V&gA@B0G=M|*{>#+u{ylO!nboygJ-!*~s!z1YTWPr%48jSI9X0s+`S7_ zP|D@Qd%V}T{bN^2%fn!iCFR@^Sw013-~Dj# zPpg2b>Etu35jHNIx~%VvN?DRcTrbNZ;Qsy9gUVlA;GTu<6P67t5XzWc<_~^nY(G!X z7J_{uI-IgO8K|j{QvsXHe)%5Aza{zbUwmqaYQOe@>^JYe`5i(~rvL@0@}>R13uS|p zR!K0gtCCj`4=$xsXxP*Nlo->`yAj}wNuZs6*z-8m`wt#hFGqkL0PnA<5+?C5%({iJ zh0d;@EP&H4ai8X3^S^f@7$z1xV8>A21$D97?*p@9EzbE>n48~|OY@PC%G|v4W;XU7 zarRfar&9)0rJBAw9~r`wohxyuOPBFDfN*?G|EsXM_hDt$xffZN4-K7uZg?WN^TtT5 z_bF4<*@p1l8v(i7A;tt$nPWq^gRKH2qWr!orSR(Y?nuHRj;{1g8L-io(E=1alBJJ$ z{`IGY<0Gd+wBFKK3h7+Ds1C=_Kc=3z5rxJ|3Ebqqv-cI6svsB0UFzwLg0GcHx0N~% zKZl4st#*CyTUED5;>iC`f0%&6IuRb2hPit~J+J>e{1~U%;yk}O)Z=_y9@v-76xUq( zbeGei*8&b1kv&Dglhfz8BMvaOXX%LYI|PdF0k9IH3`^*P{mvwx!2Q%t z)Cv&|nJvSu9_d9HPvu;%wCh9RGVVGxMj!JSA2HJ$i&gPBObTB8d%&v#js0zvcz}jt zN?Ujd%V=7Y&_p=Piix0adZeJUH6>{uzBwKUW#r>!8|*5bnhRR8;4EwDhFcFOc4?NY zcDeHs))Ck^HW7YjFp0Jy6>!KxZOD)N{V$iaK&O1QNygfxW?_Hvu<6|7^opA&X{w$# z-5|;qy*=~W{~Ep6(xJ}}5&RPx=wP`1&G&vS`vXW2-&tg@|E{+8u3PnieC%^rVVZ`p z=X0MBtt^g54cCP@L&P>5Tjt*TdhKVVqrwqJ!0#fiqG}AJj`i+tueZdh4c{$kplF;_ zVfq^au}u?XtjZuz=JU(YP7g;x%~5n~Z7v2jA%QACVOoT$7xbzBTSks=2*s zlLwRd%`*d%!DsHr+I+OI1U%pYC8(>42nTJ?&VsJ%p=L9Sr$-J^WA<(QIr_u?*8EV7 zbJ4rL8$_X05)-1`w!&^(>60TXea^i_I?dw}E!cnKBE<9x?RqXA*+4)SSv*4M0M;Gt zUHb*5K0lD0OQWyL3v=JO^2PD#&Un5wN(vLUx%Uos5|>^Fu<| zq;ZYk?KDXQaoBT|esx`fLx2anLY6LiLH*`EQ?{%)p&yY6SGalQG+Ll!API%j0qiD# zhqmlR3{)AM^^p-MGCrK=UT&noI<(>Pwm3u&?cN=?#H;fb)FS$~@$^qL~#c0hBnd zPpkFpuazP`FM!COes3}+oSI?YM0rcYuGRJAy?=gb5}Y~fv|y6EnCPoHZH#nDCDW6* zLF=3lxls99KUB{DT$BPzp7om(ePp{)f|AY=wwo=!c7l);?{`lykfm zhR7f;=<{7(GIAsjykaKsx=8`9l%RUuvcy(M`~K_V3(n?W@@*gsWW$QtG-7-{_#`2x zKuf;?*108!i35+)MZdYR<&K#AA6x-k<_Ji4x-rtfwKQ0FR zI_&=FphbL1&)wil()wP{-+O$ql6oh{Ng}sec0rb^xyGqH{=^A0=H%F9;Nzxx#=FhK z*>s2r!SWBwpXHFDoD@~MgznIok&6{pQ=oL1%$7mrd`*CGGJ=QIQo^)T!F5e1n7VD< z7MUVg-#Qc%w)U&#&|ld5nHfEebJ|Zh*_>i_7!4i;2AoyS-k41P)f2DrSEhDnxy`0F zh5Y-x^BJ+`Giq16{txaoKV{2?B}8YUBg1Yb>|u3cWRejmp0hs*q*e_s7dr;e*1N?{ zTZ)6|XxfO!EHV+UkU}#DWBHI2nnVXH7MWO|of)c8gBkK{f1KYtH8d>pQBXbn(Kf}a z=kog*G$H%7!m?8N5 zsdC1RVA*jhjhY-yw-2b@w zVvYZa)xW*_rR950iI+n46+_M{@uu$6w4yFE5Q60FAxU2R)!2P*v*{(GI#C9%mmX_P zwvm6Q?;nbT+Nee}+y47dmicnsB-vAhTj*O^Yi_%{L!{r&DX7fw?+vC*(U6?SuJiKa zqZNy*CZ2Y%>Y|8MhfWsg`l${K36QO$z*Qa>n!=<$&KhlnCstQ(5<9iG+izMWyNPyM zr*aeYnsfggcKGPmb=9Tl^8C(+Tl678YZfo$E;W(yhDha4*qx^*o$dcr(Owj*^hg#B ztg4i_S+A5mL=IpkP4j}`I8(@st=Z+8x%X+j2%L#*IVD^47@W9itAHw83;2ESV5Byv zS2cXS_{tqtWWO5>&))SeU}uB)t2a;*wR#>nYDK%LQINIUZ^kkDR!D4t#0AzM9L$ z_WK)Qp*6dE>u4Z`&Sf<0VS$sxWBU1n-wnWt&Ex0|D@pyUX`kGn4B6<}$MGv;foF!N zynI32-orz~(he*1_sP~;uM13FTBk)Ifq0J=6|@lf9DfVI)M+rxR|5chX&>`Bl7xaX zh1>!h>>*(|F#vi!c+`HPZ8F%JAY}&vs9r6^61eHEA~zPrSr^Fw>Mw660c2fAt$e-@w&R3e6qF9M!)w-maRC8?ZZ{fxXVxQTIJM&*LSuV+5 zvpQjHHwbF;R4*kF_yf&p{zx9;i@`_rL2fE}*JR&>%-vA`5l+Vn+h{y@%jw#ei{4{( z<%2YMHX>rcb=(gEpw$m2duXKxXE5tq>QStB|5v7SOx11EJFEimgG@W){7i(}s+at; zYq^@2)c*rH~sktc2$=I5SN<)K66 zS{FsU^@c8$DFjt>u*uU^4{NzqL=M$^HU0%x6d;q+56ZI zv^tG|RlKyfH(wzWcm}~c^vZFp)y@e>+(7NlibA{MY-L9!+L^^lvBy2rG1Oq9HTo?$ z@Ide?>e=BdBmfHnz;FK=f{+0@rAuJe(sV#5Bg95nH^XsPl6DIP+y?1rob>S{gn0Gp zDcp@61!4fV?r)G}=O(zxq!x|3JF$)AQ{_DE;9{*ax<$HVhzL$$T;L+b%&2w8Ggj@BQ731=BnPE3!jf!zHeC_*{pA*m0b z^PpinFB60$De0WM-rj6c=s^&$<>!3F+1fQ>f7YJOIck!1l>&6!Q+=j5g)AuTx=LSP zKwB0XP>VfM+Q=LGkT7fEske@^H;kE9_%_hgT?>cE#n(GiX%>fC!cx_XkeG0>mKqMN z*+R*)9exv_quK`}7gU=pDIhv}Zcm!?2JE!urGjsz{pXpm-4u;B)fMNffz$_rZucFl zYdn~+YLJ7}X@bUd@tPh#eb85r%&%52eJ;QJM9YD`k`};?a(1vkPn6~T{=rz%mE7XF zrsdY)A3sj8x&NSopo!4rdVveN-CL{Y48c61=3f56`XewxlEC&SR3%jFK{`A!wA8Zg z)WkyIUp#s&0SgY7(bn59a%^?fx)xGdliiG`)hAYrRUDH8sQ}!ALIH&Gvbs~wiOn2N zD4SdB85MA0!`jGkLm^PHsJ`38sVwVA8S^}E-P+wmMgL@cv7J1Tc$4E4NYr^mEZdko zW2hi~{|N{^v){Ss&sFf>K$DpS!qAE&$*y2uChS_!7e~c7A4rSeP1;5pvSwJ3##);) zEwmNAR~!Rsf&R9m`%c;Zc81v%`QkSKK8q{=x@0z*A!Kp-x3z*(^Jweh#MLgvVYMGK z1FiCcinzxhH3cv@J8>yB5M0^y0_$x~!x^!B`r2;Y&T^58YRbNxh;20|PVD|%O>_3m z0ao%_m$JVx5y-4BeoGc^$$4xKZtWV)F`$i#QnsqsN?23&tg7s57mf7`OR;rXix=f* z&g*USX2>WE7$5EHF=N7p=w-4eC9q>32)W(2`Zt*MiS7Y3@NH*{3Q~~AV5)yu7o
XB9 z1@gD4G=&o#h!g4^d9M2z!m_7_TOz}9)r!tZ#KwA{9vFzY;i)Vp%Td>%Sje z;7NPoH_f!t&d3k`_4>}MUE1(M#-3m(6mD7a$^4p~ z0J1-B9#lKL?@^z~%Xy;M_5OndO%?qu3ifig%@~#dceZNEaM`m7H(TOVt(l++MJ^8= zt}HO*{RZ@q?!IS+o}X`@|7i+^>qfs4bbbh{tWotm=cEmhEIAS6Mt=x%&E|B5QBxKl z5%AAdC>V_(mF5h3JAt{$_A+c60C>q;i(`>|zZ`q-xx#P&-DRQWLeYsNfX!Ore>=ae z0kWY6K%4mKF)a!1j!QyO1-f4TI2IzXDj3Z4_<1A@IspsP!TMg{|I~7?B$)3!LM=cq zxx*pjbY2tejCi;6o(?aq`lgw6JK@j09CU2!HKd0%I$&GPF~r_I$h?Azw^7{eZ zVH;TOD$QFi={+E_mvvxmcLvu^W1+|f03hAB3Y`DG36^d1HImyXZaI*8OBqJ|E-`!a zZ5p%?nB?+k0{{BJA_QV%7RX*+JSB3y+XxpJ9fF+N3gi_sV+pM3IX-Mq2@q!QZ?(@y z%rTF8#JNwLEztSetId~K<<@>8Z+kH$v4clNe_6@C%nvOex0cyzF({!ZC zNlT#Gi&EO0RLh%L{nb83j|El&5k-rvfNFX+=Q(8lwP;_X=YK^=z}h zg$_+@HTLwW#VGBBklc09zxHc(+VE2jVRHlY)SY(p!ePs2SrFn3~}zrDjgAfLy6Q}I$=tusmcB(m(n74;X7 zf;EGu&sYutS~oHrpho2ov5qDbKl&P*-vy&%z<^-hG@tGnFj88*%X9`2lnyE=0bH|v zj+zD(P}VUJ%Hr&;dbgM`UKZERk)y+5%Jc{L$YA#f_v9Qq33S?veDWW!coA+lTxt6g ztc^P$y4!~K=;WF2p;e;UwYR>+FDh$p=P5^!2Y35C#BCPVBm z#1`fSEc5E4lqgZZW-hfN#u7xC3fAj=2HWnm40NcSouQcBcK`%}Gn*I z4yx<;g1}rS_6(koF8_sjT@PSCb{fR02Y}aF_8PxmuBbywI{=VXvUQ}%K6Ag6+0;8b zkTUrI#-rjptgw{z)u_N3sA0;;>Jh8!<+#{Ci>;M&ZoHoW$l(g$zfq6@pU-Cijw6*` zCvi^Vu#6;K*Xyg)12)q&be3!cHz@SHBeX558K5`;9t|x!+VgtiJC4cDRt_k%dECJz zLBCEe1!BVD=sRvgqH6E22w~q5=)J67eAV1or-p{3*2SK zK6TN<9@lKY(fsQ9OE+@tb*6q;vg{2S0Gw-J4!R-^!aGf2B|Pcs0tlH}vmC4Qfwzmq z5F2!dQ&pW*)t1%q2-Xp)$^onxu;Wa&bwVK?x^q-F8GVkB4AyC7tB3+%|2L1A{n=M} zX1bQR6`KL{?HQg;es1MQ$;+v_e21Vc-klTB*-pExpfr&6s5vrMkerr?4ptEkC&mrOJ!<6tpHSaJKzWc7s@~R|+4p_j=@amWVkhcB z`_*DLdg>!yo6zk6Vmm#(sJ%PH%=C=J^;X`?$Eu22G6$Z=mPZD^z{GIs#bPQs3 zyDbjM&y3xpwX=j!IU587JS$scf%2!Z+UL_W%>XME2g)4ZPw({YO;$)?yLBn&II@1m z_Qn545EX@>_h~$WAR?3ZHbe_g6$KTRk=VwfeF(C}_zmW9nxl3}4@V!l880+X(nr?4lYfbKsW z(&vNZ4-91M%^!p%U{A7ZB0!{;W`T_yOp{A&*r|CkskgsylO{st>b^_`WCE!j24Ka=XRMclAMD<@MRVN5CtK|ocn0d$A{@5D42-)P$?+5hDn7QS+8 zFE`f!&m`;X+JAocLuJ|LD$jT)&{UL^JhBWozD$;0}!Zd7iAXK-Xi~|#;8O-&^t%0 zBCY0`qqOo<)+b=S1@L0Vn+$C4faKhhh57kCLN$_BWh<0RCeF!QW93$vglEtSFw`)|qzs z&lrHhI-|Hm2(9$)VLehc4U=98JHP39hq?oWle8eD#j}9pM z>=T#(#;ZlH?`qk;Wfq1S%g#EjUHSB{@Se&7c(O-!8V>q)?P^0X@!*+di4No`rN zkB(44r6iaDc#ym7omqP{a-}C0Y|L;V0#gMbmppQ)+N98D@M&ll?s>70dcezd3YRDy zF!O?W&aa(@^1PCEU!=R77Ka)}!o;yPS6A=)X1XT5+Ki`>ys9qm5Ul(v5Ur+VWxWr* z@7AoKcuyasVT+*pU=O4fzBQz|X1rwamMar>K)`c~eFD~yO$lquO6_$u%T(SU{4O~CW~7zKHNzG@*a8=!i< zH9Uz*fk7?D+L~?u*cNd?B*h>_?D^lUwtv!h!pJn)_hcVPO5(1s&*GkBNW&dwvwJg* zit4^ymLDp=WD`h6S%9tkL4WYNcED@)&M&Kt1N>_BL*l1Ey}BbLjd-GlNwi8H*rcE^ zlZ61583=G2Koddqmd?2nD46M^nzk3TI)l@hudp(@j_*``%?14~%y|=O4iCy#onAV8 zinjy+xlbANjx)ax;XpH0z7PweD~R|btFtxgc9m-*Sg`yHAWT$grLPO18k3fF(0m3e zt8w<%Y2jXmat9KP7tm`v~vyRT+?HX!~ERYnb(TpV&n9q>U`p%vd|rd7xxG_ zM3(su(2ITq)ldtc2_Umo0b~Tq^e*9YS@|81{2u^V4j)L)J?i)87At}&?$U;EF@dFm zjlJX8A8lDwL!UrR1mTU#QT^?_^M$5yMw*vo8~NTNvJF!7;DODncNEo0KxX>XJtTnR zCQ~G>4k@<@AHBad-1xxx%v@Q90$6G8yV~R06O_Q;yP$3|{_vtgJY-8xcnm4QD(%wS z{y5DYq7el&g82AlY&#qjO3Bfbfht{v_O%Gxj7%0PK$PNuct>v9^WzD=ew=f7I7Z2U6Ya*&0z!;LK@5g zDs<}ZA974U235^m(f?RQ0C*B6A3296=+Nn$_+lOv`zMr=vhovA=0se?>UkdsT zMHNx0P(BU^HJ~!EhQl`ah5)-$g}I(NkOaf5gucNF*fw@30@ugEQj7Z2^e-Q{)|)v)~^9Gm*}JZ-*`YI`Z)TRv#+AZ_?Ewuje7+`KgY9Xs;V3Q&qJj8 zw$Y=D(5)s&dm2vn3aX6kFb<(ZJ-Y=JF=br~0`4Jo2Ms=|!pKZJVa)IZ22Yug64hPy zmd*L0=WcwP3Cc=44zs&;7UKE_gdn}wAvAM}nPy(i7pTVh2mv=I#kIX=rNol})yH4T z5~(zIvYtD6h!X{TUC;BV{1SV>)S#x1j|1hOZ!_eb_T~ZL{}z-9zeUsBUQ)}6bvpR0 zYZZW19p=VQMn=pJUVo5?u3rH)!~XjjeIWfOoA#bR5>pW%1|xn6c{FGnu|I(%ine71 zc78qgm0rJrg>cBg20mJzck^?Lk(e`l<3J&j$QQ;a*E+KVo#$QO!JfeUe0O^*@whRJ z06_xRX}=M0J9+2zlQy0=4&(BIc&~!!nAOF6^GK4q(hjNBHPSh7bGO(24Z1xki>A;0 z5xJ~e!4Lr?AV0y0w$bU?o-ESt_ogjtrpw<8*oS~QymcLRIWyAo4IXJ|fc;F*!#vLR zY*gUc86XO)@FqE3-z>_y=Qn<{RsLn#TM>Dj;CWNEsSAo+@DN+qj)^$nOhr}nO*Ddn z5WsbogLoNg%D86urtyE4NINFPXlO>r%_-?UNrRrY5m_FRV=i2j$!AakdQ!j_lN)M%?EQ}_Z|$aeSR%y4ohGyArN9@ zsdSy&)L<-Y{m)%V6^>S0;2@xrRl6bnONoxqtwWeg5E)kcz`yv9G2-82;}V2Cs&2BX z=m&p$E7~x23%@ObcHtj%=l@vzeTF~}kJ?A02;9G4?9@LokfvR!*l8ChEZtQp4x6hk z((X6^>y8-+3d`DL8~ zh8h-Kn(y$bLu0X%AXrw?KM8r!X@CkLRlS%$rhj`c#soLW;=;+sa+j0RXW8}Xcg2u( z5CB=ure(2h%Kp#HN)?sGjuCY`GkFc?h=LSQQQ@Jg#NP@%O_li7=+wB+kUKD ziM%-kdAGMg2cdpBpTmEg|2LAI`b1gCYg+D?xoX?WVX@ECw;-`NP`%3AAdhuvREU$~ z%bRt3-21Qho%$GONLgxbr+zBSfeTOr`Td$tA_WNZ8UI@ZDLipaFb8(xCB7dNrndqx z&L#LuX6!Y1mdJAhtD55f8-x?^31}8UbaP`3hBQ9saY2ufg(r#UAOD{e7D$yO=F!gDaGuJO zOBzHU=$Ko++_nE6{LHy?ynaQLk%({0qW?U8i2`SclmO1sh>%hHzsS94mE0op~Nno8OCYboZhe6TsbOE;=CH8&?>iSj92ykys{gw% zUp+eTmvy*?H=E1J5YmXy`B>(+7({aV z&(_jGVnrMKEBLkl!K1(|u{3vG8#=IYL+lv~F%)8i*?#nS@tJe!_+*B` z?5FtZr$3bapmDl;j&PM26F>1K}(DI?q>Em zuYW@fvFt}ZqWcWqjC-IT1?qOSL@K8bwQeF$&!KXbXb24qkZ&WmCLyfoXs_@tA= zK!~FRHp=c+c~Q;O#6q zSJxX>uEK?K*1|!Diyph`AIVN&e)f<{metshUicnY)nkz z?=>T1OaBZ_+<=<{itjuITRN_UP@EQ0qYs}?{=tdz8gUuuvp-i(`;gPA_a7Rj77CtL zK5hVBo5casCOaz4P2CM|j|!+Fl?^LFTS0ZwCd$iKk1v~jF8s}*VNV;q!Wo3A7P)b9 zVdB&WHr>GG7r8dCp-&gFEsM<$&Et3x!kn}ZhT;@al|>V$ug>AWbxgF`D1%Yf>0QlD zWp9^F3Do^NaDH*vG;%ne>W{kOw3wd53CY5RQ7bc!^ri157cWG%yNdpO?2HkyPU-5x zctA|JuVKT0^X6&bY-xm5qi;1IaCH5!nY zYOx=nxHJ2?Cb>z{dR2~EnZo}#NL5l4MXA@{D3gVyltH10kU2~|a_MV=sG&Tapf+tu zoiQLBq@q(c^fGtKzy6AqlI~%`s-jlBQk}m14*lVu@b=yFzaMJJ5tqC>73=DohHt7g zp-q6efWV5jV@~_xyg;8V9E=zQ=jIH0iXW6BUo(tyyRlxTU1CRjFog$ZE)@0PPXyo1 ze-kPN771EHhPr;qXsDJpNA4!=CJ_;3H=!n4myVtyqMM8>xeraQCx0cYO&@|xRp~4! zys%yr0rvMjn_N4!IFiV^C@K{0x8+US0EAG-FQr~un{L(|exdpp4cZ_iFgc9+Z?~EZ zWTSrGK%W&wViza?>#A-ZE|~L>H%y|+qDENqp_CUJcAgVf&9G`yjcG|;#3+H9dd8t)jec9^@=16 zL0~@lIDIk?GUtwRx8^iNtURK%sG1)u~I<2wij^8uOvPm`vIgr@$D2p~jtOy+yj>Aq<&%;1N{js!$F2|##spVn3aE#DR z0aQOVP+Rd9_|FtX7O9k<8HoknLCDcd1BtdwSrC6Im{d+%ow(0a)N>~Dc3TsnjABq- zkN2R!OT-yu!XRxv6$&5=N*MICQ6isVW!O)rY3;mBCLHeQJAHTES;QDY_|uRa1@O3UXAX_1Plb)mLSA&6{sZJ7eBVHkPN_2nMUl^B$&A`1>y#uIvI*&yJC*+bAAq-#KQYB=_OpIxUkSlF2o}YiR95F>m(FRy(f1kywET@#9&@Rch|B2bA9hP+@vFr$nZ??sx3-o4vZ zR{0|Slo&KUE?bhWd&GhuYJAl|+>nr$9ef>J6_to)eC+Mw^O7xUhXVLtO3`_1z%j7= z^h3l)sZNLIAaKqS>&D^W*e0YP$K~d@OiE4mRMM!VCU%%gY+LScfv7|dcp z$3D!lzr2x+wg+3ecTD*7$hQL?ZGdCOu$g$6z|S&Os{nN;>iwc5RL zEK5)TBe>)4Ve`oW=^L6R%lGc~Z9Tv8x}F|ZdF!B3iG~d!QBt=w`0CeEGv7H9?C%L| ze~3t?Y^G994#XdtG*c9qV7!Ea%>LZoV9_`=@G!rA{$>YXW$}Gt9q9QHK8VlK~tgMceAcO7}M$;XXm~%4oW3Rsh_CqBS{$rV1Et$EhE8>L*Ddg@! zB=&a|e!jPar<5&J{Bf!A8;-{}bZ2>Tz~br7!T#EBnTRbY-ZuImP}2$<$-NJO-*9mX z)^19_is7T~HKpch204@9(|2-Mz*a~ED9?HuRT z9Tyu-5`gX)-AlV0ghv?X2GhiIOn>Mn13 zsp8n81N^%DPeL2b+rehU=cyEpBE(WhhJK%R^0YzWS$Gjw>rBWO3yaWDbOvc(my;7B zFX5W(ZA`Dbze4C(48%yX3mUh7K7*_^Al_dR|7~wl>Unp9TI;e^S#AiiKBJn-Hkm}c zDjqS8NH5N37>deah9QSe6D-KDCc~Z=hlMfq=<%Y2=GAJ<6M-mrVZ^HQmz0S7oce$= zHbxkO9&ksp=3!e%72CzibB?H=R;|KxhX@~33ws3vFUx-X8K-o3<4!<_zj7Ud;oHNb zLh$#JM5)-Bwb;bs*hFOf{uNf*m|;Zft_PdRKFHYE3t(E>b>|Qc_Zo;oZs4$)B=Qth zxqu`wIZ08^<_)+9!3%Fxoin|H65nUYll19hKJyL%h75;j!|esmdU5-FDF#PHTI!$O z>m@ug%vxeLbWY>z(*rtS|Jfn})~sSP3rHUV=2vr3KMm2sbTU|oKb-NtIzD|e{q238 z@H@p}#ew=pyspe($`Ekp#ii@bzxJg3laPV1pbKDGSQ^L#NW;fVwT-V%6N)6sdM662 z&D(^L3U{!$J#^mB=uERW5{DM(21KT8t zDNlZ`p|}2%1yyIWk3Ya!DIS_Nv*>;-Qo^&nd_Q&T(MGic+h`bq#PyZ;&nDhzBFE{VSx3U zI|o)Dt*3W5iNSS?tb8M7SM`tD62_l0jb<-j5>TA_6Rk86DQ`jcPiCSF2A1KMtB>C% z9CYkiVl;^`-xeiwN$I9oy-#LGb_d+X%EZX5?4}f-f5Tj4Q#n3iIx_Ly9HHM7jaFhf zDAqxLC+7TsS(CeH(+zq!=d%Q?+jd3TMn!hpwh-#d#f!YI-9unP&r6i8)0L#>t0k`( zd>Sk{&2hb*3mj-^Lu5^hhKK>jmj)k&VhJQLAy-j#8rE4_sP!`#2 zN(+QGaEN8Re6aSl$VqCrIVn?hb-V25-moA-e|7Y0X*N^d6Qjc3f#6CW*9ef z5^mwvUaV&GgL|=!B1h7jB%nJ3=XHI&nBz`Q4-;yvcq)(er%@gYoJEbMa8vDeKP(?< zQP@;Jh8Xr#fMfhgIph%LIOnCj!~%u6ex>G5=*T84=t-6`%wmnII5()ds$C4)ZtX5> z)`Ayy1~}%OE8*ee)6!3U^iT55)agP!2WwR zD3&yARNf}UrLuQjgzti_HP-nL0gEU4$r{11na^E2; z7ehhF$1EIQ<5CuGs7dvFl#?MIY0mu3Mdg!<{)kswwfQm6a-0KJi6NGcD}jNR)U_V? z7uJ zMp*@83ZqSHMhuB{3LTLTiz|LpC?8Q*26J9@#*3?ngHZ8Sp@x4b!c?|bayEi4lsDw+ zV;T2522U70_R))hX+Czzpo7P@{$Ty&N)|H4=-5R! zwg}3m5zizmc!Tr{dEww|ZpHev3U`K?7&c%y7!esKAXp{O!gxq9yd-^5&7N?HZi}z- z7f^pkh`naySEiF9noyfkTW3t8QhVpDTXa^P`A}4K%UthJBk-;R*E|)Urdx&1nr2Z_ zi)E7iIl?bu`t1Ea+<(DD5|fFET#ps0In&>=+4y1EP*>?=q`4uffq|~-aSVvY_Vyc0S*8qb8Z%i!g z)}1yL<7P|#fVE*zV91M_ZOwMWeu-<`(tpjjUA%nnyTx9?Mnw>;AZY~szy$v-Ca_Hf zp|9rn{m$c(bjKsChJ=Fsn9d1IAi2AJIlVb%fXeF< z$(omg6oW4RIqTOb6laxC#F*zyQ09}a?!(^3(#NszR5|uG8wK)PS_AGKI{M~kS>)_&^ z$1*vwU7B2R`h(&%34#`K+ysIVjJ-4TJ;kHK?+8R(Bg7zh5l#Y?hexv&Z@%T=P@ao? z7uKeWk5rzxzD-VF9;ugjXF(O_^ajySVWUzzQ4w{xq~Y-An}cBlJ%p3^$}(7Gi|NYN zOnaJ1q>w<1X!P6t-#C%X<=;`I#LXs|sCOHCUt?=8zXjvV!qB^T=cJe#SA-Gq+#DKX zK(wddH@;eTf(5R9z@C%n&@lRH(|x%#(qgT*awbe>^2(NO{ETC2<93BO-Gr<5ohG(; zaD!_^8<|Dwi^&TtWy;gBYt|mFbbDOHroc?PI{I_gtNOsJ@(G>KkvGC7Meq+|oMtxO zWJk4I>^k*syIUBwyk4CiWCPLA(V?$dTB7h0p4bOlpA>FyNmPohG_eToF7wjqwae4T zB*&Z*HopHlOwJLnbNvBc=j)jKMAde|N=w-C$hOFf(+S^qle}JiV+Z%Hc64%ynOMsG zJG^w}rc+h;Gpyb<$+6NCvP`~xRS^hUi@ZIpEV#*|(Tu|0On9lh7I{imYa zT|4H<_SBNe1UFerc*Apm!vO)8=hBB%RKzL`;n}5xdkG6RD}_~=MmN+1zcIdTWsopX z+3UR@6E@#YJs!|AJ+W+pUzR^mAwc#gO|((#2KvmOx>ZERe{)ovi_bpWx4TZ>_b_kZ zMzOd50tYGY8O#8clXqN!7MUj@-6=rFf7V0m z4v_W(xUH_)=GFX69(lQaYq9iNWdHs|v(7Tpcf7XSj|Y<9Q=GO7c66)u!`MM+REnU8 Y(zm8O_bb_vgTOx%BXh%7ConhuAJXPCjsO4v From 14a6c1f4963892c163821765efcc10c5c4578454 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 01:30:22 -0800 Subject: [PATCH 187/623] refac: favicon --- src/app.html | 39 ++++++++++++++---- .../{favicon => static}/apple-touch-icon.png | Bin static/{favicon => static}/favicon-96x96.png | Bin static/{favicon => static}/favicon.ico | Bin static/{favicon => static}/favicon.svg | 0 static/{favicon => static}/site.webmanifest | 0 .../web-app-manifest-192x192.png | Bin .../web-app-manifest-512x512.png | Bin 8 files changed, 31 insertions(+), 8 deletions(-) rename static/{favicon => static}/apple-touch-icon.png (100%) rename static/{favicon => static}/favicon-96x96.png (100%) rename static/{favicon => static}/favicon.ico (100%) rename static/{favicon => static}/favicon.svg (100%) rename static/{favicon => static}/site.webmanifest (100%) rename static/{favicon => static}/web-app-manifest-192x192.png (100%) rename static/{favicon => static}/web-app-manifest-512x512.png (100%) diff --git a/src/app.html b/src/app.html index 363d48c97f8..4a2f1ad1058 100644 --- a/src/app.html +++ b/src/app.html @@ -2,11 +2,13 @@ - - - - + + + + + + { + logo.src = '/static/splash-dark.png'; + logo.style.filter = ''; // Ensure no inversion is applied if splash-dark.png exists + }; + + darkImage.onerror = () => { + logo.style.filter = 'invert(1)'; // Invert image if splash-dark.png is missing + }; + } + } + + // Runs after classes are assigned + window.onload = setSplashImage; + + })(); @@ -176,10 +203,6 @@ background: #000; } - html.dark #splash-screen img { - filter: invert(1); - } - html.her #splash-screen { background: #983724; } diff --git a/static/favicon/apple-touch-icon.png b/static/static/apple-touch-icon.png similarity index 100% rename from static/favicon/apple-touch-icon.png rename to static/static/apple-touch-icon.png diff --git a/static/favicon/favicon-96x96.png b/static/static/favicon-96x96.png similarity index 100% rename from static/favicon/favicon-96x96.png rename to static/static/favicon-96x96.png diff --git a/static/favicon/favicon.ico b/static/static/favicon.ico similarity index 100% rename from static/favicon/favicon.ico rename to static/static/favicon.ico diff --git a/static/favicon/favicon.svg b/static/static/favicon.svg similarity index 100% rename from static/favicon/favicon.svg rename to static/static/favicon.svg diff --git a/static/favicon/site.webmanifest b/static/static/site.webmanifest similarity index 100% rename from static/favicon/site.webmanifest rename to static/static/site.webmanifest diff --git a/static/favicon/web-app-manifest-192x192.png b/static/static/web-app-manifest-192x192.png similarity index 100% rename from static/favicon/web-app-manifest-192x192.png rename to static/static/web-app-manifest-192x192.png diff --git a/static/favicon/web-app-manifest-512x512.png b/static/static/web-app-manifest-512x512.png similarity index 100% rename from static/favicon/web-app-manifest-512x512.png rename to static/static/web-app-manifest-512x512.png From 039a1e1f1487942c5417c6c506260d660d312df2 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 01:47:17 -0800 Subject: [PATCH 188/623] refac: assets --- backend/open_webui/config.py | 8 +++++++ .../open_webui/static/apple-touch-icon.png | Bin 0 -> 7512 bytes backend/open_webui/static/favicon-96x96.png | Bin 0 -> 3826 bytes backend/open_webui/static/favicon-dark.png | Bin 0 -> 15919 bytes backend/open_webui/static/favicon.ico | Bin 0 -> 15086 bytes backend/open_webui/static/favicon.svg | 3 +++ backend/open_webui/static/site.webmanifest | 21 ++++++++++++++++++ backend/open_webui/static/splash-dark.png | Bin 0 -> 5419 bytes .../static/web-app-manifest-192x192.png | Bin 0 -> 8349 bytes .../static/web-app-manifest-512x512.png | Bin 0 -> 30105 bytes static/static/favicon-dark.png | Bin 0 -> 15919 bytes 11 files changed, 32 insertions(+) create mode 100644 backend/open_webui/static/apple-touch-icon.png create mode 100644 backend/open_webui/static/favicon-96x96.png create mode 100644 backend/open_webui/static/favicon-dark.png create mode 100644 backend/open_webui/static/favicon.ico create mode 100644 backend/open_webui/static/favicon.svg create mode 100644 backend/open_webui/static/site.webmanifest create mode 100644 backend/open_webui/static/splash-dark.png create mode 100644 backend/open_webui/static/web-app-manifest-192x192.png create mode 100644 backend/open_webui/static/web-app-manifest-512x512.png create mode 100644 static/static/favicon-dark.png diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index cd97f7f6588..ed73e7d83c3 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -587,6 +587,14 @@ def oidc_oauth_register(client): STATIC_DIR = Path(os.getenv("STATIC_DIR", OPEN_WEBUI_DIR / "static")).resolve() +for file_path in (FRONTEND_BUILD_DIR / "static").glob("**/*"): + if file_path.is_file(): + target_path = STATIC_DIR / file_path.relative_to( + (FRONTEND_BUILD_DIR / "static") + ) + target_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copyfile(file_path, target_path) + frontend_favicon = FRONTEND_BUILD_DIR / "static" / "favicon.png" if frontend_favicon.exists(): diff --git a/backend/open_webui/static/apple-touch-icon.png b/backend/open_webui/static/apple-touch-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..ece4b85dbc8d7ae1e684d21fed069c25c00d4a84 GIT binary patch literal 7512 zcmV-e9jD@nP)Py75=lfsRCr$Pod>kt)Uk&Dp_^igL+>SkX@(a<$B@ulz}O~WO6U-pL+IELFdf39 zhS0&pKa^ct$^gl1}hz>Ype*YEiH7D-1sM`h1icd-_hwzg*f+GqC6KQksT zu{7YEn^Mv=(cjdPrjsPxDQON#vq_p!QeR2aOPX8~*G>{WoIp=1UG${I8SyCTK=!hM*-MVs; z*x~u_*OLA!=^Fmb?4PAZVTO}$_abHZD#?a&&H>g1B`qpx6-lc}T0&A@yKb@x#=n}~ z{2592O1h8To88>ZFjZa8Tv&y|^N+fn1 zD4!1QKTOi?Rok&EvCz&r-UC}0&&1x6mXkDT%0;isZI~Vp2Ikmlk{*`ymYLy2DYcXy z(2=)d2T5B?S|$o+lzLHGJbBC!{M(lXfuS?a?6Vd>KhwvR7SPT)Oo{b^h}=n%=FU_r z^&RY;z)P~Lq&v&%_A4ErBOtF6xTDyWv6ZXhax2Pb#9{HyAe?`Wq}$Al5PfN+RDgEQ z%_ZrKAc*iYNra}oApx)D zk}`zY3k_)J9G;3b0(RO#l2()i?Apq^1TT0T&!B5$$H6{=h z0S{^BfGvH0z~tM@%;06WRoV_{=N!>Kc)R}K*H$Pr{cy5qHZwEWd~K7q0opl70_k?a zj(lXm*CqAIOZheMVdRy%pk4ITwgB2WNA~n_l78hQe&t7Tr#B$Ncu|nCzJEKy<}C-b zbB^?XB7VDhif84(-0?!#)nv(VSZrx_dP@QAoTCiDi2+@4s0VXp!QAm;@U5?cpKfNb z_*x<@1GICFS{8qj1XHeM&uxho{X@Fb02wm3!c{jj?Cad6CBTZne2S#a8>$H9{&M~S zh0p-Jk3UNq-U?9X4(Q0tI4?-|S=)oT5CCh(BmnhoaM<%UKXL_hbO1azvYB&zRy$V7 zKQoaAFh4Gnv~!*Z#oRy^x#sX@?BNf9i59-f7jsY~LF1sjTza_xI;x35uHVi+lwD z3;HoiqX9f(=Qo?9m<6CIka$H@EU`4&^Ajzc2FOtyYG$A0Xm|Dj*+xUl85+fy#`Wh z;Ciy0l5`VD3TPTulA`X5ji+28e_J9Az{=ak%y2%$lGuQbT0vYH^)-n#xWCA2AOj8D z7*%A7)#^D3plM(UpnXr13|7ex5nBWFd>LkDltk!NVgcGY*Ei}!N?i>vUIVSxz?(ta zxkb#3`guJ{uYissg8N3LyLubX|GU>fUK*fJ`yb4VXkxb#2hj5b4K4p7Y3aOX>c{mO zh@k-zM+Ws)1|bfhDcg0i@6Z}U<^Ga9H9$h`mSzUCGu;jyah=I1pb)tiuYq=J;34Q1 zJ$2#gaff!!K|rE_wQmI9?xpn4M+1~V*uc!Fm)E6q3+Sk}#(g9)R#lfj{P2T5`|LA) z`st_o@WT)F_~Vc3si&UOi!Z*Yx88b7AAInEzWnk_{p(*4z@+J>n@-bDKfUIfYc9<@ z@4Ol?U_kgc|NQf7h8bqij5E%tsirCx`SAJYpVwlGEtYr*`}Xat>#x6F0|yRFd~eP- zJS=HocLmjZ0yN34?9Mcauap4LyYIeRcinYY2;i4repzG3j@7^aoxJtlB$G@Mf*DZv z@84f5tgwQ9{_~$}kwq2}|JH7)AfV|zP7(l{?L^Wgpd;J)c&gh}sq`(o@kuA0q?1oR zS)YIYx!9qd>(L987>|H>iYcbhYOAfLBaS#iKmF-X6OWPQ4HX153r3Cc1G=)My8|>m zPiS0DcdAm$_uqf7zyJO3y8QCXb7$PlQ*2YeznO(U*}Lqri`H0U4b3vkEUCvz zdt(IwjVhj$v{qNW$GZbGrEf7eXzWv3!4_thyz|aG_5SN->Z`BT{`>DA9_)%F>#et* zPC4ZiEwI1>nsn0qoktY~G)sD&nQa^g&_o9RQC1StV~;(iqmDXC*Ijp=FgJ=Q3of`| zxLfbK>#mx7@_c^;D-38zOADLX8;xt+)eij|NtD7Zo!oZYZQ5$9twI-E)`z{Ul>v~k ztH+EPqkjGRWj*}(@hS{x-jRkxbRZrt6978mJl`Z~wcIqE!@ysB@r6!5{dA2SIkK?3 za#yd7Escv0ugelkQ0FpxsW_lXrp1Lfo`u&L&}fyz;Jn$ZHlGo{`|dmKzWeUFE3dpVbH8=P0gW9EVTr_(P&xw|dxR1Ng_m!>`9{C}?QeC?Ip>I7x(Y(1@xlu) z)N;!$m)Yx79MI_L6f@)8*Cn7IFAMJ(PsXTGqx9{!e5BP8cIumNzBv{DqAM-b0MJjE z*)kIWI;w_53)@21cZP3q@x>Qw+ikaP8Hx#K6DDtka8SfoJ$m$L{pd%{clD?Npb4_f z(GfKqZ*n;2sLy*=D;BMpXFU4oqZ&MTa7ZS~S^(2*v(2WZmtIcky|%`U8<%EH$KNw#$PgWU^wCZIgvXr#SursF_St72 zz4zXGac}GLx;58aQ#ajoQz{};O#lt){}?kHVa_>{;OP9?-07pKCF}X-b=O@tv{9Q8 zNJ|7QT0z-toF_K$)?07g)W^sn^@ba6&^qg^lUM~c0W^A{)W!|~8mbp{!wN}Adho#q zwerd|+Db1&hYr;lXPl9VUpMJ2Fp=Py?6uck`uO9Ilioi5dOk=@B|I`oq(*?I+{O?9 zJ+~ybzy3)S)rxEJY_O2ca5WZReDQF1pL5PRTQTJ>pTT?Z>#)ZjdkBw68XPuurMf@M zn*cJbK!Bb0V2i(d=bedv#g`fZ`kxd;0?>tZ*qqkH(QaeHOu zmP_*duC67=k!*1C8`B6Gq(*?ob+;mbroBY$3h7Cd+3dBu?Y3JOiAy7duK9^3 zlwBlYhu&^GTB0Wx+>QWr;e{8DeRGWfjZQ)vno_wZ#MV#?FC>S z=bqDs8*ZrAUVAO^5Pam2v*ws1UNeQ70s1lkeU~Ka*cD0!4H~3tucbUlGJ!+Pf~P4^ znCwaEuO{1O*?64RNztY6j?g0W@Wm3Rm)L_St7o6*49WbImo^2$x-v z1+Ft5n4|)?g9L|P{32c*pqc^tDF998EX8FCBSh2ybei-k+@ zcz$3C#*vx<`gH*PVbOvrFTC)AmRf45#D^yA=fe&=EaXiWT_7(}Z#qp0-(!wBMh6^l zKrH`y%>exgfQB4hsLrqi4=^2)E;c9s^Ugc3rTK=Z+^416LV@sNsG0by>H&$LW)ANTsMauDhD;&_4j^Y%6|e zfTUY)xg~UKCG|l=v!nz7{NXalCYx-czx;)cdC3I(`OGuV^nmU{ZG{9h>^PEElLDF@ z8X`Q&0A&yl=Qz;Pk)Y0LG{THknMg7U_(=HU;8-Kr@(6p}_UV=PU!=pqP7B(o;vI^ecD`?d-fQC@eeg=F4HYngiq%TciWy>wM3|)8d zp^GeOQ_DE`LT-vnL9H2}#}+p9J@?#mhs`gO3LFQx4safdEM%XL7%?L86bZ(VqlgzM zj?@g$Vd$GG=!Hv=Wv6A~k_w$(@44rmus&T;g<2gH&EfEyR7hAL1Nbh<>$K6}BTXyRpQe^|5~&fOKa#X^p-X}B z6HTQcN)n_MGGE9sB)K&1K`QODy1Ng%nf~6G8fhYfj7!n;+i$-;u?bZpK#wm4HdACn z7cNnjg~VGiV8DRXKwu2oSWw_4iS3ycs9_93^(0iCwyqr?B9u`GNl8=7Y6NKdlniEv zCPsW74Ggnd#H`1mdMHgPn~_ykStZ=zrkaZC!L1Q~VMu7DNqsYIDrh2f#~pWQ;J|^2 zCtf2!Q;uW1QWQL=;}nhFX-eOWthCZf;Vy?wowxZx>kQf*6Xd~uZbslu^FgPnZCw*U z|9`=A6z!ZVbV~~ASPUONJS62blVAggF*|UbB|Tgv0VJ*8@D)?+JWXJz@mYkrsR9+Z zuuSK9)&$TKwWRnTNe>s%b=R>#go|-`HM>i9j7L@d0}nhfEPNQZjayHk=0l^m3i*)O z%V~JdoyTWhxE<4wm^#L)384S$vNLF_pUv_H< z_H=E92n9SbVx&<3JkzkZr}>Zyet4`|tq2|v>oIVl`!R)v4I38vuBl_!tkeL|T{o|f zcIn3@6_**`u)4U!nrXt*EKu^gpIpitZ@e-5b;v?$0O%*oY&^GNybT=z8fx3{EEJjd z7`VfzW`E?7N2cn*lK1@5jL*T8j$cVTUG?;=8UXqfGy6@44mtyRNYucv*rhw8*#m0a zT5zAsF4*mqG2?KWrBocy-$)u}W>K=RSv0?GTEE?O#cwDBJ zU^C;MBU63JC6~-}YzC`1pebsyp_x6?9iVAbdXuEpGuLd6148;?cZ0w~ncA`lq3pqf z2kXQWPYlz>@{o!H8bZ?gX7*K=fR5~F?2(i5P;TC1lGa3{&R1W3HFKYE)^mp>MTIAR zS#S&HB^3ws-exvIO49^3bOdP34SK1S!jA3?1iH?0SR6BEOsY(mynxj>E~drEks~!^ z$dKH#X(|lpZ=s2s+1N(iP6X%()YN#5*Tkb02uS!0%#Md1dMJ$i(Y>`OLPQc*BSimM zW|<{-Fjw4$zE096W;Q`N==cI?=;1!AaSM?V#!e^eaJ zo4VSln~Ltx-$jSP+dAW*J3tdPygg`{JRlBmihd1!0SQ)&9z9xw&RZqy$Xjf&MHu-5 z$dvJFwNw<)Pf1$S%xG9YfpiCGT2ve-iJBcMpG=Y>c#YSgGO@s`kemV_d9 zUNX%4_U$XWaY62Hcd?ra0vZzor^Wte1`oP-Ku2~o`98StirHVdNrY3t2JSNCdCK>~ zU&cg9nm_=N2$vg4xCBK=lVUff0ca7Vf`G(KmrY5Vz4{k(%n710UBHS0ZA(+c&NYB zYalNTP!Q>7UD?hZ5A5~8=bRfF5tb&;YnFaouYnjEz*V=UnO)J-wQ&GFN8rAre${d@ z9_lag8pulnPe|Ihb7Zjd!{Pv%YjzL%fuEMwO#QfC12Hr}$=2VS*$++a(A?~tgM~*7 zIes127&G*jcn!qV0I7lf&8*joJ-zPG9S@A^NYnJBl%%DYiu0EhPXoB;hM3v)ai*38 z&~O|tm$X5gf&DdJ0~u)mP7#0}&v|azHUaz)X=wUV|nvoGULC=s9|P?Ib4T#?I$#eI2yU78#5qTI z%wLtg^g$e@`D-%O0OrL7W(J^}mMj51Yv2gKl#GI=XXOW|lLkn3-NMY?&*G&s19W6h zFBNsu@~*rrR;T)7d`;p=WA)HxUpZ4iN1$F`(uI;JR^!EMpgRrFMRhwfqkMB_k~yFw zP-73B=vjH0ty9$of zNXOT_-Y{1{N1&z*!g-P)Gf}w4OZ_xJrZ%N-cL@B=B)sM(xdS=^HI)nDwr=i0o!eR# zbzs0umeX)E!?TgAv;@!%p#GC2>gBcUxw&f358j&wFf(qIv~?@M%w$>$Eax2N2i}jt z{-D?!7E1r`900MI_sQGLXw+0o0o`y=ghNd=Cg`VLs;2?Kd`9HW$n5~wmD*Yk=m^xf z^o}P*-3MPPhh~B%qa_`Xmus#kjkN{P5vZxav|X@E9~t;2i&5^ra@cD53(@mrWW=7M zT4>9qZGi50Q|H`VN+AFpff_zFhsB-RsuBP2q#7V-MkqTIK6BECcfP(bfNn5B0QBKLN3q$Z z&Ha9I0Dos@=j8sR1P>@8pvh5OAp-Rp3EstD+8PZIEV(0EpT9Mm1DN%c3kT?iof;qY zL6Q#k#cZ-(_SEBX@WU&0Tr;HxQa?S$jfDktWLHCGg48rZ(y|`dF;+C6OTGyx3lBwf z;5&=9D>uG!p#j~nTa(T;EIKr{zF~GgASuhJMEclJE-|x@iazO50J;I#6tf|ux~7jX z<_;|)eq?0tW@c~Y{jS~BpNLVBv)d!|GkfTca!Ger5YmDAQ?PWk-YZ>#J=JLfnoQtB5r zA6}9EH8w{e5Iq|m_Gn;9gfH`&7`dZ0#sew~=mr~`Q2IzNw^5f*O4D1zwWbD%8yj07DvjE$V-BjhVd>V`Hn# zCA9){CvfBTTOf#lEi7qZ5JXrl=mhhG&A7!R*4MK6xp?A{BNKoO5tS*u6PS z^2csY=zW&xZ~7c}b9UWNBvF{!gP2(>f-y7nGu@y(n$mD-gQc|AkI08xD$5A zRN-&h$R1xP=nXPY1JG$oIzx0Vop!mlqXZf|EN(M`8RH~<76G>dv~Np#JpwPJAg-m) i^_OM_Px@tVu*cRCr$Poe8iM)fI;S%OZ#%;J!vtBoSF;QPDy+2`LniRSS?+!2ni5fS?Ij zr6mbrNepNLhLU|ziHIO#!V*L*fkGe&ilQJQ1`z}mL_nZ3U)vAo&6}R?d2{E@yvLj> z%hKt0yZiio&pG#;|DJ1bJj^j8uRLso~FRUzXU%j7VnGM?{{Yla^V+@Yl zidX}{aU4R(zeXHc8*dS1_`NGDtSpmRtoLZ4Z)88aDDgh=ZNUxZr zwVrR-5_ECU?lEzw|&zIzmbs0PoB15mEdfBLGw_ zp-N5mwaCH_l{72anUE9!p3NuB{B!QhFMts<;a6Um8alC;EC7UuSW#8~L2!QJ& z1@Q%E;3_w~niF5SRZ>S|41OW51OYJ1ODUI>tfno+h8>4+be=KhzN7(g9H*(If933c zSY7*Ghd;Q(7=tbgNZc9Vl@ZrSs*&>Ta|56OY6*?wl{X6j;1$rx^1%|wnI1bI5T@u6 zvc|E6{&)aX^Y}8b(Q;x>0}#SG7-K$;9x@*QUaqH)q`wpc_JTdt6wOGmEe6HriedqP zmA1+wP8H2#Mcw82@#EUFXOH&n+o!{a59`E<6Dn7(oGMqYtV=JwR2N(Q>Kh=yX`iebrxJQE3!Tk*fcf9z|M0e7651@%ZidTb@Jp%EnmJ|^XARd z%9Sg1=+Gfo@hFFXj4>)zs+3U16<1uLyYIeR-MV$t_1CjRNlm!VV~;(iC!c&W?m!hQ zR@A%izN_=kClg~uSdbABou;{*2Ea2vOC`0ka;pS`@Zx-RDE*mdp3$eDewx%R0p#YJ zZ`RPEL)D~76L)3+RIxa0sslPIr|e_ z6XZA!Y-PCe;hsFq+j-XBQQ_QsgE z^8ldEJIVIuh*PIdY3b6Xdf4 zS%d-PuB{63{7pCAq>n$&xA=>D52BdTrAxaZAEu;w_3Elnp@I$^INwQJYv=+UDB z)F8*rSlzpK*Q804L@4Pe83cewni$!$HXHz;mSBFG*wjAe=AL`*(JQY&rA#b*sdwFV zmwNT;rF!-1xp9B2eEs#;TD5AG8$qpJy;@&>`DMaE0dLBbDejsfO95Eznjyz=*m=0l zw*8+mV}|=SIwF=#eWA$9uN;d{IK48&$rIu4s-R@S8KzD z4ZaJh49@`kz%2l9LnKM`Z=0*%y?ghnS+iyd6~yv>$dDl#IdY`VIp>@pz32NK6Lj$4 zLA7bqMsK}E{o7xL4I8F$BtfGX%`gC8o3@mh*-%20LB6U^odSZv@f|4 z59Y@&!-%8s z+O(;!^JO)Ri8}jidn^}1)vK<$%1vR%lUlWEY2m_!y7t;@^ZlF!0IUUo?Os~|D*%id zHA;^@`e@v-NS+ZI5~CL;dzt6~J9g}-mtJ})?j>YPUw-*z-Fj<&VqX>j@DTt&D78j2 zzxUpI^}-7;#8m`2ngt6MxQjE}GHu#4H|+KE1AxO?BU-WmfCB))vN*XsZUXI*pMT?x zH{xDVyLN3YUcA^f7q(^n`t@quxUp}R6@XaiMbEq}0N^+P5RWEb&G6A+{rdIYurQwR z{rhbdC%P+s&b;6Vp zZ41#AH3vT@Ocd0*H9Km5kSy>42LXWAUXZb@$e1x>^vENSzzPaA+sTtB7jT9)ZrrwQ zTi?tpWl8{`Sd(P{>;M3=S^1wV1&xp#yZ!dt#R@f^kTzh3*j$K_{Hs>2ns3FePMtdL z8X>kolLa$i9RR!thdl+I=lj!7KKaCrMqoVn%>u$yTWbN7NbU(6?i`E?LgO)8@s(0_+ZpJbm#SqpsBwkY4EV>ezH^zo7M_sx@hYr3VWf*|d zMR^i&X`!X?wX!-5DxT+=3uXk$XQ9T|OYsJ33wa*Ok0>nBjLOqAtO_!tq#qK6yBP-H zds+$)?KnfSL$>EN$dm%m&d44CZ9_0)zzxvja_q(Dsr`XhR(=83Z6dIoF#7 zX{V>-3!tZyuE0s#k}6fIs7H?;di?RnMcf}&G6+C^db$SydxBZfq;-SF=bwMBS+i!j zse`Rsw}x`6ehS4&Q-OnW(yCRfP|UjMu`&bz`SM!E7@Gd&+n~Wh?A1sI@t=kLf(m;b zbVZypyiw>RVWOnyd%>z}(V~UBMTrXBdFK@_o1P&6{u!Zywl1(m$(seVCs<*VrWgh! z_?xU3UVH5|v4shrk;f^G-lRuXleoT9r%t-;GAOO72>@hZlip9G+ZM#zrb;=>e#In& zt_WLMJCMp?p#=yq+pzIC)>LSW@ot}iZK{x6g_0FS%*CZCmJbyvkq?-&=VlnuEriX;oH^Z-P1e$f{4f(Y0KXG z6swWgiO!bPxLlo}mSvoy!r3p@iZ#Vbb*Vorrf04(hD|VWB?tgo($lmr_0AZnzf5r) zflX{Q?W1{Lp#b1K7{XF=*Y*hg#Zi^XKQp`1>8d_8(YZ5$_C43QHF?cbVzJV=HOe8= zuVqoLb#Z7gAiK25TEZAQnZKoM1jCe00bH_oT)wmn-8&hM$kC{g<6OOxkRti;+@_>ZfDxjCaiL4 z@w_x62f0OqlRL^V=u`FoJ*g-t(QUkQW@vkMCLHEDFwFTdVPLTaKvaZ8?ug@Tp}CZl zEGl6o3oMSa<&^F0JI*#HLPGh!mBi)`t00H@sw`haUWY@B=&AZaRv+jb`{Ldy92|37 zy%^4hyd%wj?sxiy4WQHhb$X|9z`o`j6(S{TeJN}xAkG-D7ueb}&oejs(sUnp*t7%S z`3CO8_V^Z(`gt9CS)9N^5reey zrC%y3_796v>CumqF#uw~V1EMsp|(c|yV2|7#op3XO%=}cf048aEg3dTqL&W4Lf@x( zc4}Cr{?}OnKnxf-X?j^wgNE#1-vfq5&@>l^v&8@Bn!GMA^ukA(^s3qGDg47oK^!(3 o3a8C3HQFP_7<(HNQrEx!516HhVy*7bO8@`>07*qoM6N<$g2H<{%>V!Z literal 0 HcmV?d00001 diff --git a/backend/open_webui/static/favicon-dark.png b/backend/open_webui/static/favicon-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..08627a23f7934510cfcc442d1a6ee943e748af7e GIT binary patch literal 15919 zcmYj&XH=8T^L7%W1W`x;QR!7gks>|R0D@8#M0!=J(j>IdBT|%NL3#-wMM^;FRUV~? zv`|9}q96p39zX)fdk25-IsXqHJe#?9XLicY>|PUl+d%v5Y4+0~5a{f!o0>)-5ST*y z2Rj9f^o)Ib2K)j07-_46@co=iAP_I;mL}?MfXzzQb6-{kmZvorD-j?>Su7|9qzIXV zok4^PfcQb$b}ZmakY>aI9Q+8nzt1>4vk}03cw5(`z;My)Ouglm5R*B#TS7~Tnh_wV zC?cR*TFch-2!9k>e=zeT>DAw2^Z+`<>~N+v^iW4+$c1P;>UT6l zWc(Hlf}V!|$yei$27_S-Y<7nVW}$E3y14ByYS8YeK&gp$%9rKyppX^l(d7N2%8QU0 z7%zE7DXJxz9-SvTxC#w2X}dP1Qs>SbLf_Q-w`Fs8^=QE(hsp=f24T5pI?!IHAf=xU ziAK$eh@9MpbrU}aac?5o#uh+T7 z8Wt}?t>~$M9oRfjf=2*+|M#3?&jHrYkLHOdW06b!W~c@&Rn~ zktWea(wnPVvsA^CkY(sS?KRqe_<;T4n;u6CH;Piu_Y4O3V=abnlY>1WaWg^rKTyk1 zIC`1;Q>#8ya8le@hk0XlmhC2rmku01K42a+5b+AB!=gseWhrWT550FI^Bv_qYoI3# z+#UkEmR)WDLRG9n&Wxi#2w|l%xY~4i07S8tS&cx?=mu@tyY9AQ&jdw!e-yl* zZC9xYLeCW2Nbjx^r77CCA#kKs_l6iGF<4*ro z&FR=K2_Fvd!=5mUXNq(aaA6ZbAd^bDZ^c@3o~cHjDvWs$a^bwtIxCD-ARKpIgA}Y0 z*O|c-u>e8`O%Ej3b;{6%Go}cI<5GT-9NgkMEh)$l8)00{4pYzJ3L&CR6i}6UrL3(> z9Z4_k*R5c;kfKhO;UJhEX+8 zs>uj19F<`wCQF@$@=C;oYb}>mG2QPR<^Z^i>|lkIB^u^-HNoq3!+rhu9hkw;5aw1# zTdgk{zRFzUYT@plCwa9WK~Rb0(LHM9i-E4>biPW1a46Ej0S?f@@s2$IXyw#E_HwgO zrc1nJ?&iA#T9M7%K^9RU0Y3-3{}FNngx)e-zBgx)1(b9RS5AzF<7)&Z4Xjwo&F8-$ z(d(Wb{^5tvZ>v+o=nDtLKxmd4JVLvt$7o#JKuN8Q2fXuse``XNO@Lfw=W8q?+|=NG zE=t`_JfhC(TVu){G98>kSS3v;kTmc-AZA}cT=)~v#SO^=`I7T6>g7jx+acthqL{w- zibh;?n+k;4ue`!7D{BY_hPEKI#fjajGY>j-2qN%sH4dnmt>@=~=QjN0jjTRoU@*_H z2JE*0?x&13NP}Jvgkt(V{nObh|MA23#=M}2TKdXX?&%{KMvuvfaUu4l_oY+-I@RwWB!Ws^fSkA zre?5+V(56?YFibo(OURcThofOUwQj=m>Nz9vXB#lnunqMYZiPb~y`y7i))?yFZ-}ApZ>a zA-O1wRYSqBalW=j44*BawkP*?ko!H0i+T+3@NjELN)LmL{B=Jq9zL?vCu{s`AOF4P zxOZF-_&O@wcRTGc5;s7X`q^ROzHT2tZwQH@#)WDS79;i_rFHk)OKzSK(s5gN?|C z5Wue?%|E#oY&~8nqBE}ozlDRKoh*cl>jR!*Weye8n^7?8O|O`}0Iz0xU^+Vnu$W&O zoE=7$5BM#~(-mLG3#{^6pv`7GB(gS`2gSq4D9O_wCr2AT!xvu6lv@+bhT=JOR+6Xl zc@ZBl8Ycvd3P$60>7r71ojP9AR(uRs8=gT=v0+iQ!QWhJ^bH>4D{q-$l+R9qM%5rKNgf(?kuXgZ<0NM_yorExN3uhkw z!%&Hdita{2hFBmmUqSf1N@=GFX1qx6>v9?+s{v~GXBq>5ht*4O11wpPtvx}1O76LytJ5@&A6KO^@AV@rm^Qv@1GSph&aeS}PFIL(d z@EAkfyC+T-%;@qox0TF)-><-(9TPEEvHd&Mek#orR+@n!BQDie)$o!Z8P0t^d%Qew zLNC6Q)I5*AJ}ICr9`Xd(VM0#$^=~y<`pdugbRVFE^OYZb89LTwK7C|=aH1%AI$IXG z5Qw1dfEfhor6sWKB~*fY4LvV*cL`2Y@FS?Y%{=oR!ka(1<=a4b1||hXxI;e)b>Cn? zzncBC4#)v~%mewJ5FIZ_&V&+$BLhzXLm(&;qVp=~mC>vECh?chLsGo4O~WpX>)#Bo zpr0}Y&)*;@C>1{VGt5St)C{UVoy(0Pn9C(@|Cc96n?Z*K_C9;n8W z-d$drIhp0VmC7SZ3-oHZQ()r$urdR3zt1gF(VfOmjQ;H90`$`R=Qby^d?uB(IAm!` zt~6|0!8Dqpj$RpI996|C;m2HZGJ6HiG-;z2GyS4-81RuOw8e;qJ2C=0W(*v>qp2q9txAl7p zJYX6$@*F2MYI9A~`2uaDwxH;U=UU5!&(EWrN=WDn_M9}?2~aD&mLNE~a8e`8fJ_?# zpNIAuwpgAAhOFBav}jVn4BLXontXxpln27N$%5X=+?t>r79g-V zRBuJUPLTXtVVC*SI4v-NApCZ&o{d5%VMGEG?0B^R0Xs*}#&gGB6R^dBR98F>0gkk7Q0}xw{4fbql}-FLOl6)VS+u!Y86n?A*i--6-wo}h0w_Qd@z2}K znJ-`05$B_koVBO^8>7tvwT}Kmmbmyo{o+g@Rf$83Y-oe0ZZha4x#LYLV@Z{n3-LfL z_WD8Cj~U=y&uA;lrzo7v;uwY5A6r0pn>C~=erS;uy{af2M>{6~(f#Rr{cNdL*nV;* z{8%ku*rQA9PC84b9`Zi)$o#G8>^zm@&tueQe-{dP`uIDFp%!`5{?E8t{#I9m_L2__ zN!P7O-!OX^Kq24x^;aqQkA!l-y4{BM5%JI(cJHV)VV+oFdn@d2YuHifzDXGI=;+VJ z{f%uZmAca~YnHJz(~}}T+?ALfS8c8mLQOI-FnGisZNk4!UFhoB&%C?KL#XJD8kV*H&YcU+W%<+7(_{Hkd|XLCUN9V&2<>t!g?{j` z@3}_i6x66>6XqRE4qqn^?f6_Zu>E`CF|?%bWu#L02$F@(QIhWl-?~u zeq?f$o55XnJpNN+V&d@k@3}$XZ;LnLXNA@m7Z;z4V4j_Yz#7l0LM!>F*lIJe{r74> zFCR8Aut(JK|Cl4yCKTQ5*9DHABaGNs>LEs$1F5R1mWKBFzl$RGZK>C>B^ z_FBKqC1&11Q3C_j8|=cuUHa$H22pNGkZ5#{%qCy;LLi&{K-W3cK-a5u4|+YFgp4cX z-^#+wL5SNQGG5h;W5yVWznN~0=ctDDi=bI<3XoZ5H-5=NcsxApf@IhRSW(2i^mLEi zkp2a;$IZm&;k8*J-{V)DTD;DM6|dChO63buJfD>d~v5C%vQ=`ZA}B(qIxx(Q_pTBoAMspkKVUCY7a#b zlWnZ5B(5<1aMW7f=ZeLxIkmIo(>J$v8wROTR*vaz4@Kmdo@x@K&u!<&4N?aO5vH{s z-dC~QS7+k49Y?D7FQA+nNvVr&Y55|4@n5^`Dt<;>UvX{X6Ye89#p6<9KSo(zR9aEd zBBXhE%71FaOLffLgFB!8yS+?YB8sn+!o2AsWmcI*G<3UL&-sIW23aDA4g;ZPQI zr6Pn=%O^z`n=RK`yuuyv-|(u&$*iTF&U}NQ9+{K6F0Vn$zmE2}P@Y>-x9f>d3Q5tY zk;5xbTU_2LtXx}kl-A{b=k%QM40RnMX-qfsp8)g%zXk2LYKST@@4L56XrT92f1A) zttVE4bZt>#$wo%%oI>*8cBT3!POCP$05=1idUrqS%m(V*qJOLi&=5Q4vxbx`k(jUdoh5yVI?C%KmCAka-)lv9(%dg|aynuYsGc{@~JAiB)wJ<_p7%v#yoTI5^1 zt4%lyHM6`#F8i4!P;pFzL-}AZjrB6|5TH8HY$-U4%8_y{4muIBCQ{I4tVN#2vq|HV z2JyZ=c(t>??Cz5qTU!kbvy~(rN2Pt>6qkd5AXYGaV`Rwv2|4veznVL}?jPHCq^JJI zx>{GSJc40HZpx45h&YNTs`YfrppD`aEv;o}|U+&WEbS-sl}>Tg4c|UUu71~>`K<`b~kt;u&e*+et$IGkkG12Fjr+_V_?oYIyC&4Ut=wJ zB_$BDrRF{qQ z3-r5-13Bj{x5^SuS+e=Z4{bkBCSq|x+nGU4b*tLngvj4K#7tDZzWE|wlpW1>ukgoz zQVsqyYalu=oOPD)wXtbzuqNAMJEc6t+vU(_bg0jNKlt45{d9-vdOCCl&L=eJ1h><1Y~2h3HjPZOMl&!h#>&oQG<)^j+_|vZjd0K?QY2!vF0jY zV`WelK(>Fx`J?+cRCXq@RiDssq@;MXc}{DTXP4U`l>~X& z-)mFeb(P*jP=<0qla_qzfv>eZSC> zhpq~Bx-+Ahe_VpsuDe!i%IjD7^erEL84~x{^nL5Ir!4e^kIWW2DdYZmuy1U$#{`lA z&5D%ol4KS^7<3zHYiswFzhOG7TCVEmuB<+&7pF$1~d+g zjF548^9BJI3k1jo0+~BwG$Np4Fo^NtltO4Nesv(SUVq3LaXD|*J}Cj0`25bySDje{ zhv`P#Yk0~V--fUJas7xbrH#C~(2v1;Kd5>EO!;W2vSG7Pk01m&e7HmX;)@Dnb)|2uCk-s6P-Hy zy*WyEROkwzVA6P@rb7I<^0^+TX3L}Bfr>grV=MNmZQ0=v6GFF_+(LoMLh$}hpS5fi zSrIKBOU3`I}Q$fPP)tYAF`KY~-uM*+CDk==NeybBi<( zF>Jfv`gC+&9@-LoF!VzUa!l5LakW3MWA2{JtJz<4X@xVscgu<#Bo!D@yR4{Ocjcxf z(hq6832O0-zmv*t@);-Jw?BJVUiXdmCM3Hml^0p+C1?n~MF}d98ueSbK$hB+J-gvK#jIV9D`o(lu9E zjh?BIrkXN$D9O=E&UMsb*+Q61Cn98>q2sOAHig~?taBpIQ5rwf95}b&r>Ng_v{w}B zF>#remfCe-+vbvnb`1y*_@X zhe>#4O5?ir}e@^=tCE`J9B(e z_4_s+_p2mML;}@`xJ_zG8xJ!D*!OsXNGE-OnQC3q(?`=;m-@D@GYCp5tEyIfg`+B- z<#evGn8gtkg!*qe2gt9!k($ba4l_zAAN+8(S4Co1RmYxe+wqYznHSqi>FDU5UllJu z&tRN8cG0|U;PX(z(|s`Tr1Nq*Gh{%B94#V5dTJo4*Y= zUtEaBSth)=8vvJf;fILbVUU!5z!5Uu^Dpef#n@G2Kx21Qm29rD_FXTQHlo20wIN0xL@zAesHSI$T7 zRQ|)noXcRho3x>Lx$#fqLkl=RR{TNcAdw(515?%*y|Rp9CmfX7P=P?4CGhuqK* z&D$GaGodO1WS<{>ck8x!IDakhJlCKTc?pC2H?_EA8LkM$-pn~E9MYgp;0pLkO|F?V z&SA774{0*#D$CU2`N(3Qksk&D7JuEH6RTC(zNMbI0ZO;a-hR-VZYhMI&_{HX`oRB) zp@=VUCoU9cA-CX_t#pSgQ*}$fe~TJAz3X5FXI&_I**I$CBaXy+$XrRUVLCF|Z z)D2Y(4-da>>MBo8-^~*s-^C!QKs5ApK;6^m@)~FVU4BX?$c&a5(>miX3S(A(Hx6KP;j|iA1q;wC+hwl z>`5sH*zNx2(IPWqo!`d7kXV)L=zwzIF1-ap$<=>c*?ZeTDd#yzxPy<};1G*h5N;c+ z*pkYg#`>Zb$;56kS<8s+(jWz@ZxDH8ddNu@cb}tHp896lI9_pr`cqVx>y>^Q?m8GV1WcJJG$Pz{SK%+zFPtXd$e6;=+lRN4A+(Z z3{=IBv0QhvA?bd?Eq-BiQ3TS+@-d>MxVZT1LXyaPn6uMUDCzOLw2u*h%f(%CZyIB# zrX&qjU20l;J3LlVIseC0I6|7Bw;cVoSP@xpyXf55Wf>CHZ>szni}KXk&fm=aMCVOS%o;?ec)&)IgsgHE()haA|UJm=^_?(ST>aR-d^ z7QweTjxi4n_I1p5fLqc+jMHy7+9N#rb#r4;biE$IQKrGS>!v zz?P4aaHR)|KOWkM=(TA$WWE@gF}OQ^EjPU?)Y_G<$xl7=w;Dm?X=r5s!61F3!3)BO zVuu8nNxSWI&eLuE&mPil>CvAbKr41$R%D$(psRAH^XwaQ$$pXeC!bcbH^%dVYAF-c zSnuRQP5KObsFLVXvG%>fR|tyzM#pd!bG?DcwK0N8J?`PXvArw1n;I#1Fe*7I#oaw* zE76361ieg$Xk1E+-X!_ftd3KVFSh^WHRX4sp}jrGMtSp_RwXOq;wwRWH|{81E6eF& zgpzd8a`r{i!QPc|rxzO(HyTn7$)CC}leBLKvS(R76M>SngXJ84rlFfz&(M-i?CfH{((*=|PFENFx0KCI`t`zLKW-thzjS7r&Yy89(XgL}Jc z-_Dy-EJR?9=M3$lLy4ma{RRY{w@IPc4kj>Dmye&cf^xs3mHGL%zOG{l=W6>0zV4`G z4D$4sJWU4jh>t$?aRNL+>3-+$xv9o+HDK6`Wk&JF-zlTI@1;p%j%Ul9~#f(?*jA6KJPg|^*9_Ev>UhBpT z@X%?vQshVRe?=}De1uzEfi((wO1t+sFi{(ERp&|OdgK=ug}RnXiyU0U-wS&vJlamu zo7mpn+{{+97}TZV)3kX5LsP1DUv(nAp(w?+&#cCicoL%}p~cL3Q<$ zW0vn9`|Ts;pd?Gdr#1fX>43C+qH%%njr;Xnr1>)0!mv&@{+9$4llI%)RFf$3yxS|w z9fqkO%;oOUU;Z5OGPSzVUH`~&dGcVrs&4k0g2#4rjn=Oi%+8B?8K9+r(wU31HT9v=3^dKYz`>CEJYh5{MwW^`?1RPnCYdW^Qx^M*z@>lbhe z0@Z!Bfj+oC&EunfoO#Y;;bCRDRrT#aYk`$@-d}+yI+{|SM|G6DmfENjwmL;?;`!Gt z=)C3ci8RTFNUd^XI~TQvP-5 z@MSqyG6S<{k50htKG)4of77;dan<^229T@ztU1a&`RXKqyF;z>{AOkSyn&%}s?KwG zA}!%c;Uj)r)r7AFZmdrriHS$0UZF2Rh5|z@HZs*Y)vqz5vjj@+AUj0B%c{;%4iBwE zq$IiaO5JX^RHXQ~Y%Mq~=SYd`p4SNDUO%`Np-Asd{iZK~W}bs+#u10-YIBLK&05Rs z|JB$C&RSS(1c>d6eKS`%=kb~Z!z|~t|2hmSQ~F_YRhGW7)zHDp*H&MuZTaHbKBj!x zboPO$O^TsVpNoQ#4l5LP~ zq~*$!?SW*XZPMcdJ!f1(5WP2Wb<;$qW1Vjr zxP9Y;Uo$l)@?Ojh`t#sv^~MancL)@ytxt4d3q9Tl5-nchbmgU8>yj^uZ-fFRp;Z3H z==he>=;}j-o_%q>^20KA!Z$;ag4g;3YwWo`Z+?M3adVHxI!x}2xW24+n-}vY5{GU` z3H~?&E~Yw&_Z;63u-TSa=qlIcVW+Rw172;tnww)b ziI`bEv3_RX759|3<;1^#2UL$lTk)%bDR%w>zoVIW#Qx${nb!<@eI{7naVOG!c{G;@ zs9aAjWFi2k2i$&I9lp=NJ2;C&%JD_J*v}eO2!gmK?Upd5N22yKj0)Vz%SE(}ZTpNi zxSh5B=EM~O3GAu3V!!Yw_1mcE{^fLc#8{tc2o|UP{Py|7JJ&Ncf%`<@1~9*AeZxz@ z+>i!|0ALm8>XD=3Mks{Xc{-OyPS<8zdC$kR8j7rL5nnK=71mL-CffpwzU!I}etVxH;5pXE>p^NxO=1ZBC zbUdUp_Bya{I{$q=)w=)v--hpYXHNhE=(Nz}towd!m6D z&otLD{2E4f)+JTeR#DEO3yQ7>PiafF?fT9%?YuMQl}HX}o=t+CZ{Z~mDnBX-<5*L* zt*flS`KUIEeCu~R6Ew2;lcQnBIjGMcc#lZr>|UVOZv5SPkHznXiV=tVwppKSvZ#%0 zNyXdT*L4uHVF@b$)z!1~t;-l|ayOb4b8x$5_aPj5k@k z$KjS|`WWP364DG{=AR$z&RE7W&)NXbV)Z_)HJ@NDGKr?kYcngZ_o3EPM6_#Ws}VbY zc(L64Z?LtyP(}y`Gc-i4a-GZy392iNN1$6i@6?ERcpr%6t&RsurE(t>MBwdGClC&i08 zoSR4WcC5Lz2vadf&ssR8NhHNcl%`N>tPA*YpRNE&wQ=zUgTs;q$No3j&DPS4BTAN;p}=0#~~ z>C;rHj@EBsFlwlll4N?@?9vYKU{$^EtlANFXL-rObdmj@YU^0BmHF?rZy$H>qaW%b zC|?<4rVfgicHUwCUN4|XiWy^M--OrJxCC+JZ@&{<46$9Ty!t$=X45_s$U~b08s#GH z06YS)1yay{(F$EnE%2JCbE)~g1fjaIkSNnE=+F(Jy8@j51RgRGCTJwLt^7Yw|9mUjH1$sps|FIz1GFcu{%P z#h}|K=>t@7e)3tN@8d#ZQI$5KNa}4=5Dl-RWtaxM9GY*vzG_0>lMDS4#iR3Ra?3*4 zbckbOc~~Cb8joANe@mYMf^4}pyppfWhziRv!bnX*Y#ybDhhBCK$@2NWNPi+Sm>XYm z4waMWo-dO_qF7>(jq`By&fCP!f#+NRUbd#Q|Ly@L)y_2p`B_DVmmG3=qZb6p-^s2R z%h4d1@W@r3U1v}95LVlq{zF##(pAN=bqT8y#lH54PXe>H$B8v~%;}x}COoo*qoyh` zM9#upahNJUHSi&7cIGRb(h?JZQ;ir58d!v*M{hm03R~Qm2LY(Yxw*NgZ33$w|GU-v z5dcl;KZfBBjI*fd12ONJxyCo&Z95}W3Lmz0{CtCnwaLqkJb z;XYb(KOHX-k&UG`7#zawHbZi?UL~4 zK1p@cEWs{?gewDu{%))6Ep7~%#CLnk%ga+s=vW21w()o!C<*V$X}+USfKKh1$uwU5 z5IbLQm7SY?3yQoGMx`vN&u({3vbMBawGqC-XaMqbRM!`B< z>0bbS@g!z`>9r>$>&wwL(Y6j!@$z6gKajpwYlcb%?kzh^+o$|RyD?DGN2aU0yq4Fe zt&CSoc1?7atRLQ%W{N;Tk@m#PHeuW5_rDg2t4@)Oq7Z)z%`jnT9uKy=?jB}_|R z=~Gs>?Y%3;l>Q#ZoN)|C0YPNwA54|pQzwi@g`QBr--@gKAGD$q)_Wb9?iRLF%t*>f_c%4$yv~OwmfN{;>0nIql5+c zLC*!{lw_kE`aj|`@Gp-S_wb>;%Ic<5aYJV{nJ*o)sAdA0V0!gVQXL?ciE`#Vp5O{g z>QCM@3vB=8mdDbAO8s_W>9b4eG(aCH z!WH@~u3wj-aq@nW&9%Z~$P<9sXSpnktN@@YLk!w1eKc@hxS9+Ad0CW!X5GQm)aU&} zoLoKzC%?4|y7Br)W2mH#_>%o3_IOrysH5^Z594SZ$A*g90Siq0dY=CA?q5dDUN1o3 z&C4B?bp_xjl%ecG<$piiu&d=5_4CCg<9aTw>WZJUs6KIwU@3t0`hDALWgKCpXp0m8 z21K_XH@|<3)d7Kpp(Szs9fgIs(!)BD0y%@s$h3dIL|8-nlN;|nHF8H0$p>L^L#2PR zwT>0X%LC@(&+qjMDK~OuXha@fDWKqW)?8>1J%IHW0Q^Vefo$3%6yagY*bvjEL;>3| zf8lC(AU6^NNMlj}s!z5y@4fc@e;>RI-^p}a3mkR0K{y%l*|8)F5`PR(Y=c2YYu}Ym zx!xj7bjtF-I>tqUpe4-3wfcP@a6PiM?@i)$*#X!hpan3r#KX8sj6B&{2vvw5`yXr% zTng0_{8awwz4XtSi5u9=(e;jhHcW=y{Z{B05;-&B|I!F4=GJw=~-E3!&OJtZWQ z*GU2^Nq;O8oA7@o$_S4PCQ*Y1|Juh`&K%Bvj-b^xeK&~i3&4xkPN5~k!{0Ql6+sN_ z6YIrYIIYMrkukGM7oB~K#M5f)&EC?tD@!CilnNRye=5C0GfC&f@OR3KHYct9d;)cY zc(^eDtXQq>G zd1L-e7n(PzWkB)CT}_R@T2e%JeT(&4nodi_(X4zHK-g7H?jk5x$iGrN?hltg`u|`t zF^J=U>o+250E=&JF%-#KT^6MEtk9OF@UGjZ6^+i_2}%q&kG8HVIpHLI((oS~4M5=H z-t7p>VgI^4I$85=ON{)Z3xip?oEJjd*;e|S?LrI8zI8%a9D|DG#B>yI6piO%gW2uJ z^mUDV#%E&q$qvA?5nAcwskLKJoth8Ssv@HTbM+OjDtl&n@dYF8i&V|F!^Xh7DASK- zhB>TL&9v>sU4-xf1tCKtbq6@P->1EJdyE+5m52(TI$W4Sa=3gAsY(28$ip(zO!FYz zS;%U?%i3IWF(9MQOi&JN_9Bf-cSOC7#~lXZ@)sh(mpYT9sb5RJj_e9=PPF4o8JaS&vKJ`zdq zk<$b~Y2Wj1h1hDYA8)55EVAGE+kD;_w1vf&lxwgM6vuR}h32f42E4&qirBwA5l%fz z{wk-A9TJ`xKGs{~H*?p;J90)LRdFQav@a3>;v-MpPkU~a{wH(z7JxvMk0|W0YIO(vt3xhm0kqd6z)QnkFg$ zm}d2_!A^DYX0VY~~92NAe6#(SEivFM9#z=D1N_9Q4c5w~{a#ry03K)x4f zkZ)yP?-f_on@ThcuH7j}OrT*g3}bjZ1`~E$i3R}WWdXMbgKk1YK%$x<7eupZctME> zkb?>w$dKU|@>Do4c+$N7!cCAQ4@@snfGmLH`!$E#I7S0=L*ZN8FJzUm0J^S@b(=v9 zeUa&f94+LQA!Z-#(DOy$48v25eyFDafDsOwpxPkGoC^#vs%Cf(vB8j*4g%gaDcBaW zaPc4k=M-LF_QPFgNx$`1F)fg(u|QR(t-`1!vDy98x1h@6cACoKhOPi;+JQyBX(>C@ z!{|Ej8X=`-RjU5crM1WQnhedeU^iyB^|^=kZ!4EfUxbEm({UCisffK^Sa6B3pfUGH zaQ$ilI?1SU)1}qA6hYYw{#+miJP?09u^v;&)o6n_525CS`g$m2~+0j>E+XR_wKy>qiz38_2 zavp}^BlI{@I7)U(+(ry&0R^5Gt~K}d0r5Y}BnW_?gS;T7^P1z8d@KN%>sMw1vSF;J z#=}4aXtxrFVxAQHyw?T5)u+L*UR|}RL4%v28Rs_(ClQp*UQ^2l)-7UT766}{%(EAW z%}R8mQ8=bgGCiNgQ-pio<5d`Jt^g77G~BMG88jcF&r9Biz1dS){`F9P81V5IIiTo{ zc_(Cv7Bqyl*4$$P_+PtS5+ZN;z{uV zR}OW+>wu6cf4MWZ2Z0t&ut@JlNw*$6t%0xWg}2K&&1ev^6yg-e4i(Nk=bL>2@=sAj zvtw5YE^%Xr$g@3sv*93O7Se0z5(@%dlwg_Ow$SD=BXQ@^9x8sJBM<3DHR1o0($s1AbnNek-|WQCV}lJ~p^ zHe~r}#4sn6lbQDkDR+1$Ee^LPFu%3WUhc;Jcf4rN48Z>O-c*bJ>|CBXJmVN?mG25| zIko-R{knyCS9&t9T~>I%9RNs7&tiT7bPE#RaWrZUS#g9GrohKFak`k{KIgcvC%Rx* z?(qgU>-+4jT(-l;Ff{Zg#_cj2oD!AU^81&)&JtHjOZ8#^W||eeA{5p~!5FI;!Z4P9 zZC0)=FRxIuv%7@Ub}&}oR#z@!Th9XxVbW*6(Fc(eh7@2>;WlKT7v1g>H+Cl4=)lR$ zHgWAf=0ZwaQPQh^TT?d00(+oc?sNY)f-ij&03+(BS@9#5+n+KbGu@qQ5AXIOp)D6& zP0uP@p5BkRS}Mr~afhpYnOfhP1$xAuyp-97#QNwrWc0HOhri(PzwN#Sr%ZCM52}1x z{cLD<@ij=Dd$?jE=qb>t$KI$Osazejlqm^;HAqIA$6wRb1E!I90%IZi8**z9voRd3!WwwM!#H_kw-6Av<1 zoG#v!wK<8M2>?A<4Hf=vsBl$9%Dm*cR`_dDUf_sY7#w}V0OkszN29oO>nFH{EZWRMNN+0qS+m;zvTCNq zd?qxu{?(-55B8T;muzO}s#AebmrXA?n##xPP2`DHBsQgcKNryuoVHDY;6EDk!m)rE ze~L#tFo3nzBBNlzwh3)AS@=Que6Jt;f?%lfTg#;pHVA1lNDjLwk&&9q54Pocy8Mk5 z774U#M*B;0O{3JXp_Nu)M{axm;d=tOmvx1NZ4>o`@ck`5HYZ-7r!)NKIeVD%GaE<+ zB3O7m?uUDWsYA;Zpn2AqTpJuI^9BgkZ`y{-I3Uavj}jaHZ}GoK=(hK{r9Vje`2sTu zT+f!afOfIPpSmlO^k{*=_ywVLl5@5cxofY9WtFFAuuh9aRc#OAG@Nlw0%2#0=azOv zp~&2|RvohGk!P}2*4;_^>UN7yq*(b+EA0`fE;%5q$2V=W3(?_P6y`r3>_^`>*GNZ! zT;Kikrh$IvCdEK~+l%g9vG*|!z=R$nlBuJj5hmWmZ%ttgF`?kw=K3Sc*-PQ&M?vSm y=&MgXjHw7Ugp?137y?b$Gi6m|d7vlzk7`=RDqAbY251Qffo|O}(8R0TVgDZj7nc$M literal 0 HcmV?d00001 diff --git a/backend/open_webui/static/favicon.ico b/backend/open_webui/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..14c5f9c6d437ed109a8579031cf181ee52bdf30e GIT binary patch literal 15086 zcmdU#S!h(*7KTrZG2)mu&SMnCE4H+@a(!vJR}o)CRD4h{Vyig3L=?n_esG@K2~;E< z<4EM9_@Fr4t0)m?X(z;aNEB4Wc^>1s>)X~oT_<&_QdK!xU5otLr_R}HuYa#S4~~=J zWIEluI~wLW|LNp7|8yKDFE8{v$8m1!yBrPqKB=bTEYJx5&^W5%{Hoynw-6D@R5Vny zQ4CZpR~%H_RJ>GFgupZJ__jq)1fIL62RR_Pr{j(y~0tdLn zRSp`D`cAo}((h{CBXEIJmF+>}&~8#u-_>kPfm5$obxFU|N7DBHt^&8HeXD+>@BJQq zWU~H&+i!klpzj%1zvbAJEa%F4aP*AR`a46x_?<3NqD0;Kl0GwkDpMpRZ{NO^YuB#H z@#Dv3TNkY15_|%$_}4?%ur{4~H_1 z^b_?+L*~HD0TY^wUAlBhCQh6vZQ8VvtgI{{DT^94YDi8_jtm<%Otx;_DxW|54(&BK%Q;$DuUyIi?sJu@>?YS*qUwQAM!%@bu+xg4s=AI@+esEA7r zA3iLtTD9`&a0QKLp?d_XAx{&25| zNg(tQyWw^D_3PL1sF^aFIdi6~OB>j+V~5cVfIr;Roqt^QhyF0!|7B%maryt%t5-5) z$PibTay)qOU{L;tdtB{zK#pwU; z+_~fFKijr#E9cIgi>{zwDk>_ZYuBzWck+!NtOQX1;m#OPr7(W=@891ue)aC%+th`R zA3u8Z;eGY$RT(^Zu!lQyi(|+BTHXb~pRqTt`2&4D^|nQe79LJbnlv#!U9ez*Y~Q|J zii(P4{P^+GxpQYvUhzHi>u?ExKiu0W9)+hH?V0)8vSrJB=WjJ@)->Z3{bP7Lx6h+S zjS}?3rJDTV40qj>6Dtmjyh+I_JKeFpR+cCF2N)vlXZdsOTGjC^q% zc9-P6WtBb z{QNt~EXaiME9JNHE+-7jPv!htKUlxm(E82wqxGxxvqr@4+|$Za{96&kJuR&d%@z5I zBE_GI$BNGi7Qa06j&H>{+@SyBhE7gIzJ~%LUn&o{I1}D ziG2Fm)Yra3Ty)SSjUN)>q4DX1E-B~(6S1q&J%TQ2kd`@Dtcr)m>!Y}@fPYXzTBO-0 zmVNzBzKKUU1}&sX+P;3!_mnq&3NqiCoJU9-8xi$E-%(G37&kSUn1YSp!^=T`)5fT) z)v0T$9+zMPTW;IbXWD8^zdw8SOm5t`Av<^Olx^F#$;FEo(D{5i*@9Jo`FfPHLY+qdjF1Lh1K`=zC&5qAVE;memVo4t4LdQ6xw zAyNCZ1K)O6**<3ve!@N{d(MeQ)G{l8|-8smTJ;Fc|063suh z|5QZOKkfi^@7~?yi+Zqc-#&A$Ykff5#7AAabTMT_$XR{@)b_{yBW%}>_kWoIQJ19zA*#(Jm7mf3$rw2BmcT4RJ`>{3B#PrSq>4hm_4f zv5#n7=%Yx<{QD~d!vQ~}Wc?Awf%=e=^_S&9s2uG2lm3D}i+GE(MsS z+Xui&P^?~QD4lXmleDpcEo@?&eTp1Ko+6Qb3eDp$ibBOT1#23>bD`oFP0m;JTdv{{ z#Y+YLqs*+>^5YwEa>Enhx8i?__X@u{ps$VajX1=0)6i$qQ4PndZG_a7KX&zw19?r4x^NYKHj z*A&KAoMUtEi8;sl^XJW3e7A1h%>6O=a2JI6b4buZ_k(@GJrM3b*}DhKGscY@C;j^M zGx@2cj~coj{h>pLy86t;NV9L>zOnS-;Niby$r5v)yQHK<;Lkmk82k%W-}ukY`|%(5 zjoRy7%*Bfrn>APr|3#D^KBla=3u))q(aJAW2a>KoA^mFA->@{2YCq^(QD53RNx4wA bfagL*MEiZNd%>mb_i9fBsuCLy9d!Q>dBe1} literal 0 HcmV?d00001 diff --git a/backend/open_webui/static/favicon.svg b/backend/open_webui/static/favicon.svg new file mode 100644 index 00000000000..0aa909745ac --- /dev/null +++ b/backend/open_webui/static/favicon.svg @@ -0,0 +1,3 @@ + \ No newline at end of file diff --git a/backend/open_webui/static/site.webmanifest b/backend/open_webui/static/site.webmanifest new file mode 100644 index 00000000000..0e59bbb2823 --- /dev/null +++ b/backend/open_webui/static/site.webmanifest @@ -0,0 +1,21 @@ +{ + "name": "Open WebUI", + "short_name": "WebUI", + "icons": [ + { + "src": "/favicon/web-app-manifest-192x192.png", + "sizes": "192x192", + "type": "image/png", + "purpose": "maskable" + }, + { + "src": "/favicon/web-app-manifest-512x512.png", + "sizes": "512x512", + "type": "image/png", + "purpose": "maskable" + } + ], + "theme_color": "#ffffff", + "background_color": "#ffffff", + "display": "standalone" +} \ No newline at end of file diff --git a/backend/open_webui/static/splash-dark.png b/backend/open_webui/static/splash-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..202c03f8e46e189025b204b5bedc0552aec4ac82 GIT binary patch literal 5419 zcmeHLhgXx!w+%5=rK*4dM6jTsAc9ndKqv}?B28*QM2Z3t2rUT`xHJKgt4Ixmi}VtD z4^dIz0zxnldVotw=qSCvcz^G`_5O(Wtu<@DIkV2pS!edFefGqf8tZd&fw=$x0QbWO zI*$MVmd)RdgN9fBZ3m`pHm}zA3d!(-oDDM?rU=BcTS}-jD zpfd6Fp))H0aPHGX9W9F>mX(Qwm*@2B*`{*sc*5j>y!^tF>uSImjuss!QLHW~isPmv zKyqCGi@dy!=C>DR2^<}JUdH-B`^Rd}LTFZTdD1Jc2&=3>p4(2ZZ|NTz}c2nMk zy+QA;JEzL>=Km~oB$W?M2yY*6O}~aOV#K~R1m1eJM%dq)e1-A#^}Xotvp-^Ik}@fo zwBTl~cjcEQB*GIx$o~m@5fLmj#rsd_@`)|f{s?s=MPPAlX^XTKqA?*i$^Pj+ALC=- zjh61(!z~Q!4kp-wP;x|4J)#*_BvOKu6B0ZxHr&D*tKI#S{Z;8xH_~JJ=<3Bmvt!Y@ zzPgfsCZodGTf-nykvAAtl(~apwn@GfXH~_=};2 zZI@ZXmo>^WFyVsSIb=2e%pZ1K)_|KPh5etkd&?F7_QYB59E zbp@X&AI+^%f&{-DUbCX?mZ2x}&B_yjj1Leur95!99_PBiY=OjApAz&Wm9p!yA9`$7 z5P4jZ(P_4NM^5rFDV}KLAZc8k+G64 zOCOA|b}f$whrhRuC9TY%_Sm+Tr~4YY@rNUOvviry9B$J!ZtNXE@F9M~W^2AI0*7-3 z6ISe^T?H8EN4O`i#MFlO9*@)?H{S?)EQswT4af$VFWbAFAG@(D5vI$>ILxJG!YcjD zp^Wh8Z_J^WkKrgE!mn92Xle1b0=9Q`#oaZ1B1E+wX?q|Wmd3H!JJ_|T`)T5S$-&@# z;V)-EAswEn*=*z46QNrYVYvbfoV3p%mvgW4J+2%ceJQfKMH^ZSp#tSVsz_sL7o2y=*&Byz<9#!*bv+;V-}V#!q8uF*kaK za&mE*7lTvO#wV<`LXcgLhSpaSP&0%O3~8rN7KQLYi;WpnqrG6e0bR`leI~}nj;sh7 zAdx$d)0?B*Tzel4KCvUX#+%2qB%&-th*85)2lZRU4}XTp<*!XNFKh^(+I<4N-`M1> z0m`mUYi|y@Pg+|)e!oDfL_H3wK0w`_?cUX>!|(u)UFJ}as1$Mb^>4=EHJ%R+`IpBm zr(GAfzsmy;l@>p1E-^0}v|{bsX5*1)LJ>x`$oTIJ4fYF3F}yNG`+vp>%tK9I`T+=mZIdvi$~zr^7~$%<>qIIrpxZM7}ffe{7k7N?xp;cv`ZDDtTpyrDJ_r_ zH=gl8H}MmcNx=#t=nL2^Q(XDV-f)k=XkWok1lJe0G7I0l3opQ~QCbx~;#OkZr{NGh zePC&p91*_lCdL1$&=8+SQE^wKu^_nbo(H z=JMwG9ld-$_F>h~E9<~}ziN4K)tUO&AC0*Q)8K94Yx1o7+5N|(!JGx+puO*duBnc1 zjxqKo(}sU*`18ObvmiwNGyQe2!YJ62f+BofC-J>w1ZY))f8GNAXoLLb#!D>n=6jwrc1o2)x1;N63Y_yl5Wo3rCr| zsT#D|kJipVFZ?mB0NSsrwZ$b?a&K5c-K7^epjNR4)-o{lM}#jGu4 z&4tBc|6LHa)&7XFPguO*tOqQR~UHhgkUZTg%OnnNjXJ@g1h=suMY&*C_ zrp+=?phoGa+#vcs5R+!xWR@V&Ba~xIsvn4HC?;NmaBO}nqQpRJX4suuujd_L;sZ)g z?PfsWS>m~3nq%p8H@D8$Tph4eHCvVPc@l98+j=R%%Mzs#*%9aTaxwmI-u@_o`6U2o z|4V5uH@&txJ+fHt^PcMHItBp5INPRri|67N2_8r`*Ry-c^9}kexU-DY{iX`zelSE) z&9A6Bso$3Fer;svysJ#(Ks2EsY0~6hTjlHVcK3~#{&`r=@w29u*zt=hs&8nVo2;qc zI<8HKm~o?8I`zvyvqxalHFAmsDy6(=dv@`7b6Mf$E!;E!ZiJU>llD~@q=m6H(O}J# zyI75Cwx%(7x~Gr9cC-Rz)!z``C%3`vo*6}%0zcx-9MN&)Nz1VVVnQeN90?Y5SeJbs zFYS{a0kj{#m>{*pX+5LZ8$PdSnwJq!U~o8`_gzQbC@=C&>Q~jD&dEM zoXh7j&dp#~oVTVqE>Plm^6{r`TSgKj`=NuTxTMrUuexc;W@$l38Bpyqnf|Uee7LZ; z`PUua{cCsyAV#C4bHS4Qu9SYJ^nd3;6NPRqK9bp0;zwE#eb7GIDyh$ovf>aTqUsT! zD+9!^#i}E!NeUTbs&SbhH3xpihOlG-jEwtASq0B9UPH3=+q<*^QIj-X4Qc--NisdD z(zi1RNW2d#~nSl21{eZDjMbt23IvV*C3sDfws*mT} zu3e&6#St0)P`vmg-`=4QAyT0IpWI2^!iZ_YZ(0(E*WD$`Pla=d2{P#qCqJcK*XH`= zxy3>xbFCN3DJN|9!k5Ld#zIWD#>TfJ!k;8L{mT{DW|ctxkl(R1eza4VL$qiMeK5rP ziD7x(hC$fF*f{(F?*He9t_B5E1ObnB{S)4OaItW1WUl6V+Fu#5?Rprz&Ic0$sSWWn zmPI80dnecC#;2$Wndm(2=tu!ZN6O0luk5hX&?|nvzLMmH@ZX^kE+1W+`oh=9;nyTV zPLvzte#X)8){9dQPvORfq8Gexi))~Tfpt-xu4n$ui~O(Pa}~)eZqtM?V~))kk;~bZ zWTHp@v-5lLEI2+!hq9gq6w;fPE{{J8JnA$yHg4~vkA-DwveSQ0ZwOybVF%B|WS(!5 z4PM6 z8Y*qSq51U8rojAm?#}b`dxhi$COkTXRc-Xw$chT)8awgT3P4;r$L58Bdd-BSzC^9W zJI3`wF8|_=i67oSvjp334YtN2XEz+9)rm~3{T))hca`yw>MH1L9LPkcQWC|W9%Wr6 zTm3YXB|@E_p-@D@+L%;^2r3dTUaNkcslGPHp^)eMg_m1wTaV zdD1OQHjuM5oQ$`SZ0q@UF}vZ>4~={BOA&X*VrS_-33&Wq-=}69SK_?lg(8e?mEB zljbQdS-;$EjgSIjSiq?Pa$QPF=4+gTBgvG`vO~=)`N zh*32_CLKjtfM=@+Z2@^nMcaQxvOj4!i*4nrZE~N^-*o4Sg-3iVkvhWZh4Q?^@-Yl4 zUErze8#l8+ah551wjNI{a8Qm--6G5OYoqHKTbsHvOY_j0>6-v-E2J#b-;zpsCA8RW zo}v@w-Zrv%=^oo9vJm6uFXfJvGnAke_u;uh7~+;Lcwm(9A)%Vea2{w#CUKw?S%Z8;12Z}KS|Z5;b&WT4N8B(9I+SB={)?43UQFVjW7 zU)a&~Cy5(Ja3c;Z=)ZaMbH4QKz0rve=1r3)6(&ca$ z<$}kP8P|WJdh}gJgTUlE0(~WyOzhr=T=|o{a-ElH7pP$Ej^~!^y7e8Nn zSqJqgQGy14z(}&U-U}7K_iUl(zU;m^F!P~!W0d#L#rtfO9k||=zqI>GI*N9>tzK5P z9~)8^c5M?hOL#q+-_b%e=`;&>b?tvb9f|Mnz))2y%4X&E2 z;Un8RtdQ?BJZ!SA`w&nVWqzt10Nw+ADJc6d`85f-uZ#1)GW#h4Quz$TExA z635B4cFWII^^TeYX28Rx{jFR+TD{qqLq%|&Hl2)qoRI1*IOjf`4fK`vd4v+?KKpM; zs%LaEnatJ*gJ{esww|6c28Gylh3pHz?KWSUM+5?i5?lGZ(BW+3i+w2-r&w$+kwp|H zL=9br`dGSms^+e#s3>7+_>gnO$`+MR>9uR88l-?kv)W(57b@<#yhw=(LtP<@{zFwX6|x2)#(s!UZx5Rpi)>7j zRJnwuapv^KhYHRkf`G)!+PKRy$$ONh1Ob)XOmUObo3xxi(g#%G!qOyida*VN(HEHN zX3+GNXkE=qQoF}JrXLzeba6EtQ#2?faWyo{zko%IaQGWk5ZXr8#$iRB;-~#m6Cre; zt>r);j~S&dJIci&XgpJR`5((qyoG-aaKM~tNXsVP<^G_B-;k?Kd&quoXareB;&9>z zsfpiM!c&5T9Z6jUE7WdfmYY!!diA2kB4yah#dMk4-2o8P#rx%j{769}nL3c#?VQ6T z#?g%M`r0y+jN9LM;-tupl})u}pabDeyv1Bz+=R0{lW^&Ah1BNCSTD=+II%zZ7~`)! zi8guCCt8*~drW2OMX;4)mc(xzM)G$N`$m(Xw{IZyUQa2sYJ9t)qwlEBic=t3W|pal zg=%f#9RXm+3i|DKrA`v%GtVBU|LUM)g}O#SW5C1`FRN+|6z|7(^?E5m&Y1I8mHKqf zD$<>q5PaAEzGH?zeDTs`gB`y=+TN|lv4p{JNSh-n#O@}(#KanKz236J2Ys z6_#>I(>k)=ZQsV>Kd(1*Q@tzywEcC10;{%IPF#Haq?4%|R!-WLzV;FGYj}Y;=!SED zq?#p3v$cGP9W~S(0`GX-0Y|}4ul92K^Lz)ZITSjpIc}{DEQY?acWc>PYO`z~_ErG> j&%4t9_sk5aM4hnr#y*)uRjtMRPRK)DW1Vtsr||y*--4>? literal 0 HcmV?d00001 diff --git a/backend/open_webui/static/web-app-manifest-192x192.png b/backend/open_webui/static/web-app-manifest-192x192.png new file mode 100644 index 0000000000000000000000000000000000000000..fbd2eab6e2b8bcd7c510fca905f432c93a4f2ebc GIT binary patch literal 8349 zcmW++Wk6I-7ru0(l)zHbEsZDwO9@L#OUF`6D@ey8(o1)TOGrydhom3~EZrcjbf2zbldgchmVW?YcD$Cp&JZ$Z6!IN>KFYk z06YaC3bMN1X8SpK$;-V{VJijD7Lu^BXmz5TJjMw%*Wyr(E*iD)u^xi+4C(?pT z2OScOzzMnM>>gH#82JG>3`4k+QRr_)T7v;-cY-q@)9+UKE6f3+)BKRDsx1IHRmoIB(I>X3W*VZ-4lb<|TOh&*Tl%QyiGlHF4ajP?fM>fD zm!}=bAQfL#6b)ph@j66o*9-;ybdT6B3Y@DvsZA!=T!Q_UO+ zF8E7y!}NXQK^KLW`1*xK8~3g$SK|8>?16& z*Lb-4aAAC2yz>6E#C{cDJrq`AZt~eTp{){O;#ve0AF!{J4gA-w$s7r3&qg}4pSzpi zmHmm-v#3W(vM+J%`qm0QKbLKp2Hu6vu#?0rwi&GJ3HJlBsY$8W z@P<^2NrHqKsL1xWiq?#E$#+6=((Yi1X`0j6!_XmLv!6hhk*a9w(ks6TpqkXgKTNcx`)*s!ZvfdkQ6UFI9vz^oePBhBe|^6Ecu# z|28F;Yuf0<@gmx62)I12j;0eC|LMFcyNBnNWk$aF($pUDU9AkMOAbsMiPt@>n*qB7 zvY92pX{>^K8WtloA(9Q++T1|XBK_S}XIx==Ytv+?3U`|#-p?pEn+-RK0UrcUlAkUu z&{Qp*wGd#6YkRbpOr)-ML#Fk5&zvpCfT|J34p>yOSLdozf3HwMG@H2Bjg8l61-=@R zTNCE~F-H=vwCDGs@F8pR6kN1o2u2^{L=k!ogicc0ONdSB0Umu!pivsB^-PFecG2B}^=mFQCgHVjF45>yX1lHP5 z^T~zEt$?=#-?)b89VCG4MmzP?Zx*Kp?ytoyCy#T%sG8S);b?cZy*)7HqxWOWsav${ zg5NUVLi5O*bsakf7x<*BaPJ$A{N^y)&w#va3H02AMyd*<$i_)Gng zXmTO5PAwP(2?koxZOJw+z-m-%_D{=kU6Se>g|} zjksK8h60*|my5uvgZK4`K-Q&B_+K27`U$`$<^H5h=ob#~>md9QZf)6{9=a_@RxvOx zH=D`r)>oz8GLL6o%LW5)Pcz|8?Omw7&)04mW*~S< z=Cu0lRk~aU@OE4X2#@9SY43N-;REH%dOLkQG8Do3m$4(%xM!V`w@4jEcmj} z>ZT#dvq2LJ;AyiD&$!C2b>Kpj7m0!0_V}R0IEJPwg@aC&rphSCf*(by0by7}1;MU` z#Zg3@+{K6Fz_XQba8K-}_cy_){9px8(_+Fwtw4Gy8%CG5R_#Ja!4Pi(sz-Knyo)o| zjar{T259>9+$fomQk!w;)V4>N90sS16k%t8Aqpajbuu;xEh_s~G%ug%2OC8;=&$&n z#z^0v>BKNdivPR5`CGjnbh{SLbe8vUS26LWIFsjJhGiJ-@4SPhR>xFB+wozMQ9fvk ziA7e`M0LlFihLOHF=!xJN8^ihz>R(Ei>;%Vo$w^Hu*?A(dH*gVV4+pv^A|!IrwP~N z^*9-ajUNfVTN!p|SL<Av)shc8ZZ%ed9@-_UNB}j=z~UTFqYrSx!v61xrc~%=P(4}wMO%p=*KrO7rI%VW1zj3=F zKAu%MCOdo9xcENWrS15&)cx7SS5xzPHK9IABad;#hB@0TSQi|_$W3-0DW>EA0F-{v zWTtvkE8wl;cB%qir^|>Be@ye=C9joA*0@*NuI=agPNUC_fo`X|k>Y^MB?CN0@BHGt zz?<%HT?*ha&0l8(?S$6%oGP@l4%pdRBCOJMy7pT3QBq8Ol3nmLcQf@dlYn1D$k9u4#tTK%Stq~)I{=o?L{+D`^_++P@*`t3J>MD5?~jSseb zah{Y|sERhQiIcjBFbHYhwk30Eo5TjVPRs`QUw*eeq~?Y5dX_*8Pl+e!mv6(F0!u?k zUn+;-J#vZ4IlRU1rA&Qm73m#^DmPfa#&y7k2dT_2(#Shmw5b8W3@Rc!uTy{SR>Wbl zbar3DX|@Ixbk2#J{G}xK<%LFN$ITzB>M^b_WWW!Cn;azFyaD0^HZTq%U-7h0?xyoG z-MdFCnD*^nU7VTkZqe)IvUtbKopJe~n~A*ozg4aM<6EzlV*vO~Rmfm3&8d9WGf7OK z`q4ks5LLUyip<%V{8thwtKNv$!zslk6p!zYLy?>D{0QLz$2)*I)Bm`QT4%4cm=z_#LuKrec z2JWr6>Ses^r^UhdoQIW|j1+({SKCV07?X1;FuRJRp_0QoMUdW&70(6db?k5YX+B#S zN(SZu4byL-F^h~1VL+F{+x!?+k2V79O4HuCht1Ko57l{mBTJqO>WxdD#%s&Du;Pqj z?s4mraOJg~xjMUNPfMABA8yu6;NqU3OSELYrQZeV%N<>rc+T0pT!mwSgvpLuYA|v> zOdB>_%sUPYI$FO!q2f5~LWBBdJ~mR9i)=robVw4~?)CHy*>m5j@;B(3O!akM6Po|~HWjQ(tk2t@llI2V zeLKg?A;YG&y*3kbH8f(yQyn^-<_we)Z!#B;qWuf?_RkGwXB^%EM!yO)=hegJlfU}0 zX&C+Xg!a*{=4L?V9~t%H2D9Wr)90$TlOZ)R95Eb#J&}$MYUYduM7pMf1!dBVmi|Es29G~qN^Jina?lb9MNMc(@?UKDANNs1G%_}X(rqu7yv#YO->738 z(Td9Sr{B&4HEF6r&+OL9vT)NMm*fSuTp8)4=jw53Z5UC0pw{csV8En%`_afLxsGrP zWXAkH+#gvCSe($?BbvJ;Pz&OJD3}v8#EV_q$oy*BLI-0B6fM5ceuQ}3pU(_% zQ1@LJhQ176`IoBEuv=6NfVD~>FA0I+r@Ia(4RbQFBbT zc>x!EX@i@K-$?Gsl;{(w} zRL3IHAJitOsAtTs^{7gFSO~mJC)$JjJbm4?9;-ScqYDKPv1Psbn1F8JxcQ(fb;<_6 z-K#KxO|*%WWVv?C-KwNZqNU}REovYh9Bu!Tzi~KuSn1U&I-pVL-n2>p zTmghi&``51DRDIxz4X=jL|hA$LB<7z*Tn zo5BD;lmfUsyC_2Q15?krz6Gf8cD8%J30T4RC(j1S8tT*cZUe!P(xISUM*7&=Ek9BHV-;6^T(cmM>bqMvK@FPA6KL9Q^(JQqH~a%}JBMJ{oAoWR)u(ZEY&o%}X` zZFaKkcNo)d11Z`9n&^NrvyS@y_=Bdk}**Z&_U27tkEi?`gN zBYzJA@$$5MnHQ2iiC|jjk-Z#u`r7xzcOyZmEllkd{ADDvff&s;7jV>!^cbh8HdyyU zK{#_LDHd+<>(>nzqogl~eM!x#(e@~<;e13kB+iVlOn8Id3oif|?s!NKJ^P&<@<@7C z(vjiX7{i3j4lR~<>gESp5R#PYxVd7QJ9 zZ!hMqdW>W>#^i|jlkMrn@Zd%uU=}rFiiZJiU<2}-F7sRSO>t*4shwN_f?xRpSjGc%L-_Tl2{1SD ziV*_kAWAk}V?9N#IX0?~k-Mx&9dmaQO3gv!7U11}Ht4p|3pP9dj>p92_n9G9MUuJA zC2<{chSP&x2<}&!6(D=1#v_IY&L;Mv7JUby(m#6_zapo5dv`kOx0ESr53(DJZ`teS z!BZ!kjO)OpJ$(7MBI)WZ6Uzc0XwpT&dqKQcVARK53^3$#Tvx9DAbXlO=j8R`#3lU= z!x-0s*Sf|D)4t`zo{s}5yb}`+{Y>qdeW>bq=F@~}=_I}OiK30=Gr|>ZQV1+77%c_p z-ie_)=%9JKpORbpK(ne3H=Pj&hUP`$3%TMkW#ky{V47@<0LzOLm|BQv5?35l=Hc#C zA|QCBLvD+Yfqg=G1miJv-(`X*UkoZrI+aB59(Wx~zZ>9~3F~?mobg6Hcqh$!Kxlw| zT}sqD?&@Y@j5TJG9thx(M~GQ;!P8YI42IgM;iGn0&fku=q~tg~8Z5E0>q+y~lez%q6`tod@A2-C&vJ#UCv>knXVqEdc@)1wh&E;V=I`Z@w$6&XY z;z;k+^k`EAP0;|*E0_9&_2mJ^p<5D4Hx_P8|IONOikJ#c5lI};VkhGr5C8lu8%2Bq z$XDk3#b-?1`^_~g5Fml$D5ph%M( zw@35pN21?l)vlfQ;y*w4Zj(!G`S>IIfqcm!rg6>yT~4x~zI=7Pk<{KCG#`hN>=1B|ik0?^@5X8^9U zH_szF7{VRP;Umy~mN-M?8bod$WUr8ph$&Vda0)ra3yW?&XyL7!>v4a);(hcTuVL~1 zzjk#8LLhTvobzgVr~zWb@-?z(`MChISMF~=mS0P)+bdj2B+p<=v$ws%wR@KHG9n&- z(D{yM@l<&+aHW`~oBr9t%1FE#GWFxHV8QIw+T}0RS?V|Hifk$WWtOZuqZS<3Xo4pq zZDgyyXrn#>IruYRno*AM)&5|~s4~_8h|}s*g>0P`2)kSpf86tE3b$i+CMh9k3O>x3 zKKJ^~dx3d$RyTLB(CB)%az?Gpb9FBCfveBjUF8mQdizzVH?AM^Z#={2$LQ=sjUXND z`HCsN!P5ZJB+tw2Z&{!ke3u$yv)tXwQWgc7EH|iblH$F)y;}U*4w32#fPCZm0umh4 z>J#(7a?_gXn~{k#>(7y*79*uL_y5}R;cx2oS4Tqlzy5TI%6uu{tt#^nKR-l&pm78n zc@=W;#SjycVW%Za-KExZRv`XyGWHcR41nV~3PIHtom!*-jx^E2;q&h~?QSP4bC1_a zU^bQ~mj_FdE|d|5scDmsfzNFG7Km#YCU7J(J@3xGpO*GMx`otK>)W!gqtr2WC8?24 zseS98vhv^Uk=3J}A|q&;ZCYr*7i)Ic;upDrACc;=i_w%x(2&N$H>2Q6w`Fa4;7q6= z!5RGrAMn)M!`d1#9#=GpQyWD?q@~utrU;?pc09gOyX<#i&&AXXF3V|we4zUu)2PH4 z8Gw|lk*q>U)~^%w<1}9``wg7IeQ>f{G8JNOB-KWfA*eMD&AO6%Z%15UF|QQ_RSu0P zVg;@q_SsCS*neEJr-?dh&XuM7E)eA{`N1#+4egP3@TJOpKMfY` znfI>fj!oqf=MeFI9h((a%99I>UP5IM9U89n6xJCgD@XmL_t0fPdrkRe7#&66jDFT+TL{jg3v@)?~jI5M&sX(}KC2Q$^?IyAg!Y}h6p zO#|JDcp0oFy8rz@XiL8IEx*}t);awOjSON4tk=dbH|a-&E%yBXWHrt^l&2q-;u{B9 ztydBNE~5Rbi`SuQguollHWpYdpFnHHb|M(hj%#mxVR z<=R~u{O|o-$?4vvb127!rC|Ki>NrDcOw7RF_aXPAG7EB_;B|GHuKx8k2gYE|HRuCw zpwP!qV~;WcUen?KICV9qO~3PYPQROcbQD@*mamnt&}NP~DrWuTarMuWy?~oDZjml- z`a?TJhJ9W|l|Hcd@Q~u6%-f{UzWiR|OBtx^{95EZQZ+!Mfxty_)`$p7Km4vvUys6a zv0<`nAaPo#UoBA@`SXu}BmWCoYRG(Lssr&OQ#}&!sr+v`v;0VvG+K~E=6wV}gZY?^7f01@Ql zVsT=hFeI5Q7Dhnw=B&0#2sgS&6@mr-f<8Bb#GUVkb4J0>#K=iYYjJBXHEnW1&U4 zkD$9XREku}29@>y-Z3D>%n=0DPI)N8g=2j$BWvDy2yF*rREsULMB>)m8;JD3%Gp@G z7`VcOZ%C9mu2NL`PrWrN3SCz%!q8^45T2W%NosM7qDD_$qmazzRgMf*Es7OPZ-K2k zLySH7JT=Qc@d2wpJFAg(@@{-zT6*5SaD?YmyIi|-rW}cX@$<#%s>Do}3raf6%4Xs- zd1n7K5;tE0Io`*18UVrt@ILymN%2K0zl+ZgE))flUL*xsr_$LPVa&TYRW8lz#g4?= z2(B&W2Y&)xsi4qNYJ?yzZ^zOH!@>v>6nR6mhbmJ%Go)hpHL8w<-CX5imN%@P>W%>y z3Ehg$+#3(@+g)T7GvTsdO#^w$qMjJ4=Y9LR_205KY$P%IA#>eXL?C-OZs`fj#A#n! zRSqvek1y`NSZE!GO91+zibdIcwAm?HP4qtJH2}Zw8vi9j2aexfC_8M3IMR;)kSN4a zx3ppw)BGQoQtYPSPgNa1DMg;Px~>Jy%)iT8^aMfHj$-YEGAA_25&1zZ8_%CYB^2Ohd(SUE{30 z*{S_8x62P+r@83v9k9u6=^7w$RTAD~yrGl=yn(A{{6goQSs|tOL15L#_XJ#vt{fs>!UzWCewip;Pue1D} z7!|O8fsItZODQ#L@MDL5w)8Aa^ba=i5?T(?T01DK=8OdS5@}jjm1@ZQ=?Im+klEpr?n)TYU0-RKR?OLb9F} z`Ee^d-3L1#7?oriQrue-W-tk>`(rDu$&In9kL7@fi)oqakszDp$b6xCcSz#b!=KQ2 zeAVXQeIJgx&M3kGOD5S<%$&i!W#j+Y!<~M~)U5F*4NhnU6yc(~BBoYBX-1}zLI!rZ zThiJejmNeREfBORw^QyN7B(D7jx#NJ>#R0n_!W~cG{X>SAw?P0(sysDpJa?C*(|1@ zH9Ru?Oms_Pj7s&BnT_FcVBmK52!SD$!eF%+Z`t2di4@HLRKZamEK-#qV1be`wVO z0l)arw^x@#3@mgWc4@}C*V{u6dfV?6#haX*T;1MfKF`b;NE)DM7*_Z?t!)gxot8JX zR8%hc0S&vm1g+FGf2jQW`nH~R$V=kdq|yeBh8d=fXt-j%20x@jzC$;jn;-4@KZDgE zp$MVFThrn;J2c44hV=#lab&h+hQ;ZS0!;on6Qo?UNkC@d3MJK-&h#Q8)mmfm?b1{n zaBGaPd5{CCeuBpvg|!Z+Sw2mV+Jr{=$A4KaZ`(!HOR_{l>y{ceF~+o{rj5833C3Oj zI6V5}!f3e_UrhYK@j|lNCQEzjWveG(-zdB)9y*j`!m1*?T{%7kr=w_*9nNlLTe3vZ z>QsKo8nt&Mf_s9~rK7A8jbtsHNjc)cwot= z`9$Ksnl#g*^M*kkvm#wC;2@Txf@#z$N&o;k&WfaSxG;mRo^85@J~bU z)n;nuMrajfA){M(^Ora|X|ULci-h0*UQ?gfSr)a$+mao&5UT}FJg24z7x-teMwGYL zhYPSx=#(fm&!VjC(GLSu|9cCtBSO(}{|MOKhsy?iGF%$vbD2v2m0`FvpP`@5qhRz- zKG7h=^w;5Np-IUjlN*er+kLLXNQ+6Y#G@&!utS7U1t9xM^yq2MK6MFcDeNEJi`qzFnc zQIOumiXcQl>Akn)9sK_OZ|1!jXK*Gr_nfn5_p_hajQ_dmRJuftkEA1j!V--7O@P_I=xw5H`yFt7` ztjgQ^vesE%-Sf+L%KDvsPQLpuS2I=&>-;kUhn_Jw9HZ5aC3hPkHnb!*@RY`kj&ypV zdj-5r%rU?Jk@{VtJJJLzPeT$|SgCK+qViMKSxHRbG1z7xi-T?DK_yNSF0{kVDs5## zk(DJJM{k4p<{7fGV1PNtk!bOi_z`@y82V=zpUUp91Ug4tTcYcWEbB{`!W=*P3{un6 zz3b}dJ_RewPLJha@MeBSJ$(+d?L>6EDt{NH7u69Q(xPv*$+6QD z^hC@#77D7F(XW`d*d8u|J}}0-r6ra7);(YHd+3;7SF{;0cu!MzpM%~jHu~+l$%e3z zHE*y@PcF3)4W=iv11;=&OGm}N=fa4v87;yFE~9z+#QV@9+Zw}*YT|w-%lRLNmZZc( zWV^pVn^T&*lrG~G<{@(%lygfktiPMw8Um+i3Ff43o2Z-XTdl;S)HdC3$MSZB%~&va z^rq*;@N4$FnRnTbCH(!4CGD&;4=V@O{aK=_^jRCPZ`-L&bzJ=s8ii%Rg#PVX+Gz%L z_pKREAnRxyJ+aC4j4lDUC@6@DqZ2**$QYj03mLmqFNvCfZ!|p^f)u#l%&t-Y?W?Zi zg2IOaY$3gY3J6M!I#+j#hG(r4v2$L1`FBz1pnDlFTG86&)aBv_oxex`&irJGuAlp zidXB`0hVxysC8P?=9+gS_^?j@d+E&!>%SX|%D2$XxAYd3h6EwPanfEIOgU49HmRFC z%FzsG`HM{y8d0#?_{5lzU{vPn!Y#)m6Q8BhkNfc0+%cj&n_s$})HRid!(e$7;#dzb zRw~ZRYDHPZb(vV_Q}hW4K}5(BmnkvayCVsX;)?Nrw(oqSy2-kFUXIw#nHB%m{S7h^ zd3PhV0Qm}*;ifVOzDiUwI+w*Sf#Bi3Ptw=q$sBS~IGo`kR`XmygCq%z2u^ zFO7^X8K)ojKcky*8dsr8GDz3N`iPFUk?yOXcOtT3<0ompT8Kc;J@O$Zx!pi;&l{t; z?G=n0S9-VevIgz&d6Yl$vn6Zz3%tCYkRC~S;d?o#!J&>}<}Jf7s`XdoqEQ^$6zPW> zrVl=q*pR&=GfY-r{yWB2A4R`zf_6o8TyMeY4;=JAvdU|wt34DXdvVnOTzcIQa?PDX zFE}};oMHtkI_kBjJIXWF*Ft8F^jR`v(j>@S@43C^g_?N%A86u;C&Ca;){{?A93gYv zG9cJ3)@$|Hg3`9ER&;IANZ>yHE5f7<9S+2S(mxxACwYQ^WL$Vk*3fIj#=asS~hH>5NTTEX*^OBktx_uR+ z^^Cd0wee>3!2-=}PU!OlO!Pw*3688nM-B`gJcxb7a#uVwaNavC-D778<6`mX&XkaF z+%@FlMGV3Dz4<9eH{vOq7sAL9VL^s^a`H!NdoB-m}|~QIIwfcyycA0 zASNT6OyBr$%dYu%V9;Ofhwgg`9^3l_gjqB(4B_I} zZI8~~i``2R6k-X@d74SX2C_4F)|E0PuF|0To2#H$9K8&rx%^8n)xq~W;^nKzGjB@C z_Qa+NM3VO)#PCmDv6**6Or7igfMvu}9;A(YPiz@aG%Hm)PL3Uz+w=pGf7pC9=*jlr zai?^f?$p4lI!w1{HdX_zpcQm7aB>tZihJ3>GNvqSdlvDU8h=K0A0XY3E@Y-7t( z=mmzR9S8fK7BgrV;&cV~ z;|@0FW7JjGnbD!Q`{D{?62M9Gu&$+V^?HT25yRzFj(29pa1$p{a2uF1DkR1QW7)>h z>z0QwX0j!mp%9_o4UYtiH(^4Szd8QaH1idXyN-0ocxo!`ebzVdr2Ghb=VNg4xN`Ho zpc~!Crc)l>U;e5|V|O4oQZQeFN~AiA`jjtRBJ>DM=qqU6{N?6?LvdF86K&}k{=#V zldo#-cxjz^`m4DejIZoJwhwbYdhFHO zfD;AC55eq3?ew?iwD%QpFab#DlKF3l;e=1X7Ip z0$lhyV<&>9g4_yEv11_ZW6N*^B6Ncvq$7w&Ep4_%1uBB)R!{QU*&A@b|R9RbI)%B<$@c%Zl@Q50-kW7vA`g!i)syV!6mhd=wPb0J$ z>qzSvA}iEa`jc%we@hgoPsU0_J&ju13<5UhMihSEm{ag+iKmdCreiC(NxsCR*+1we z>UwvPTc$fRolEbND%eBY!%UcRK$&GWxQ#88qfmPP6R15hGMtD}7n5&O+ETm{gG1?l z@#}rsQ@K;p0NfbRd|4Xb^|qRMXau+SFY~NKDzvx*LfkI(3z*byV;vU^5SdCi;q>#kyaKNCBo_rz>_2|^$h{$3_sbUtJhVB z9Es$>8fdf@od20Pp<5obp7FYbfa=poMs#qNgq_fyi6DY3Vx^QiCnw^jVFyEB1}A|W zZ0g69C-*g9xsvEH18r*&t83ny;cvQGT&%CjC$&Cs9BBB#;KZ>SNn$U(M^) zPdt>}g$oRo{>(iz?2rlRrkXzDq$#^FI>bbxcwG>n*;tt4w&&V?0YMX&ca!E9zdq9$ z)!zZH3_u4~Bxa_2zOMGj#@JGIw(Gw=1wm^>4Brj6h$oCww~P>S5t6|Q(w4N0-|W;< zv!90V+AK~E^LLNR9WSKA&8|*4dq)+IR@}*w$YVF9j2+5%{CTJLCv-!IrX9lmhSErA zyVDl++a)}J74lc4_yDQ|e7=!(ZyzQ!vG9k07VH0^?)LZOzK+){ta}H8mDnk- z{Z+0LOOC>$D1FoJ*22OTEOW?I^PW>VikA~&$(uybH(gMx)7?Ivy%9q_cP{pcX zNm`uK;xY{uGyirWR&9EG0p7pIxk!(mu1(7Xgw9s+UsLSH)^B)T>z{?o{`Rg zh9rxoj`ffpF@Frp+}0YF#8F@HWK?{7L1JY0+DXpHGTnMwZUn~RK$R9@AC7b*`Ww#a z^32xOc92N_KmXx`agAG1R{G0KUxt%`N6ai8qNxnwJ-Cr8;3n7N`5(^4(f0_T3z!(C z(rz8a;B)p7oV&Y1Jc+VIZvkKw`4dxOzk5Pm!IQ{mFp$!JgWrv~ty#fV0>)FbH0Vj! zf3EFyA82*jeI0!&cuZ>yJWG;(ab380b2mr|xalt27Tg?!M^sak3wnQyh7n zR4ZV2|0X+p#;Y>DUA7vrQ9n#BvHou{%iRFk1DccjmgDTWG66Y_%E_?G-YfsO;9sEG zAp;EScoP2kH5%OR^-bc)pH#ZS%*t|ZjOU;K?~Gr}*IXXwecptJVnp^;tF5O5`@PzrFw{7CA=yewmPi| zgAAzuM3^3sLtK7K5&FdbMKi3j1?SJDWKz5x%aIxmu+SU!cwo^W4CGvK`7a^fC7`%M zTF=NN1^{T<@dU zaRKZ9q4Qn%h9>L}k@5rcPfhJHgY@W+h1fxgGsfw|f5aD__9P=FvP6rWgmWxIg4w_d zaAPL_e!E=u^FQ1YL=-SD8D`(VDkC;=e7MIgx4V*QbG;PC$0#L=>0eyId+cTRSBts2 zm9#e>)X}5a7(ROPqu!-&?Ej^Kf2@zoCwA|iaQNvvjQS&^wf>*UvX6xR^AlmSKqMWE zyAuVTP(Iz(Q1F7NVXpMoG$vJqtvp9QxC+v-f49saOd2I5{AK%f)}1)S?c;n;&cx@# zgUq`zVfIMH9zr!+xpC}2?hj)@0Kd&QhWCHp5o+2-dO-ukcOC;i2B$YK1Ye!|7kL=g zSAv*AWPqNGC~dvGxkAa#Yur6uH&1Ih#wCsee3km&W#d`aS-}BBz;x2a=@F74f|ibq zpTrki$Nz8paXjH#U_ii`aa#p1#l{L2`l*E=64(gei_ACq-&!XVNkxDbsc7#wX#Ml) z3moYg=)x9X%-;Wc^S`s{l0*A~C-*K0V2TCMrIl63iCL?V|CUN)P2^Q; z7&4!*L`89=CLr5>Ow(=LR=kDp|CYH)S{h)GFMSyQx+ZfsoUGn_AGudKvNsx&@n4FB zv{Iblh<^5$EPRv?;<`S zSFct|$y9^8nXkmqEWjt;O}*}ylP33XH!LdQxu6Rp|8@YLhXqhOe47NpS!9kQ>A%kf zk$A{M(9(|S`0G|yR=b(&v{#=CygEW}8$r#6U@Y^CUW*mm8y+CZDD zx!Gp*p&`;`KQ4qne!RgbEKlsi8AoN$R=F^3{y0wcV5jTpjDG1# z9AJ~MJ#+_^MV+oQNj}P~wLOnLX#DI}dgvzJ z;^nFL9Z5>5g5-DGX&1<`S`^P=bV%;iVV54TT`XO@+&dfV`uL!6@pMUhRcrc?1yNYS$|$KJ#WXz^cc zj`>(V6gpK$ovogecOOtu&Xt=ksQa;*UmYuj;Xr17zSCLy+xaQaQcAjf;JVA_mk-zx z4(85hN4$l@zvtc3fnrrk9Ruk!Ew&0FWfQFw^=z+ViiJ?6>sK$|0ccy}6EGrTCU)!=gqYA~IlFLS~OWGuDe7{7NhYD;pt#>$1xQ7}Lh^aKopfuP*IoQujc9Jb8T}RL^y&r8RAt?Nwkq z6&~={{vCaRkXw5-*MUIjx{xMy+q=B6+f`dOWx7(exJz})Y3AMc8=0sSjl%&lgZ9^_ zIxW{uCC{*dD^s5u`Z0zki(i->c(fgQ###FU)ieyF>UpjBJ?#(wkq2*j4W)J}RRtB|Y|6K@FW`aJ zrqE@pfhLhv;?Y8m8Ody*f+G=t>ZWy6+9xHhlNnb7(%aYP>b7LlLRM{S7Tct{uEcf) z*h()m!)^dE`C4SB@(^EsLD@5VnT(UGlX73*P?(q3u z_)FvDDv`UA@ldEUvmNBkec&8hlv7%p)cN{KvXK}lyO&BmT9A9Z$`O}G-(w!@vFR`v zHu_M1fQ|afJWFxwrQ>)D`Qe8HH-2EwR^RxzIaM@gMdM#W$X0~nn`FbWh#HXTuoV)pL0q3;9}x-XerEe%Rl`UKJZ+~@uh#7fSLbJ?t6 zr>f1>Lc3@ZDrz9!q-OA?w#_%z;->c5lWrm2EF_TK`ir3S--o= z=6KfGVxdiXdBqgN33w+@VzSegm65EA&ZMBLA6}lQUw|bl##0E@WC}XRuH)lTM(;VC z<1K()t)XmARm@gR&37nuUt+vTSCGvz{=G*_ ze?~N`^Y!>8_9L9Y^^Z2?94;fj`^BYXN1XqO^yZDhFltEo7b*$6x8jnTWGk!7uc{+4 zs*@7Si=f(0<2}#E2P#YZxiW^(GTNvqQ9L?aF;Q594`!=J zhprO!QtuSr1LXI^`Iu!eUq6r|DO}8g1W!35X{;hAD)5oN11-13dJ627`NFmrY&>dL z9EjRbBiS-UGCYY@S%Sy_trH<5lxX!uA@>4G|FMIw=Vw_X_j{rfqn;U{Efb35gebw8kjC`m zVyuSefLHT#iyV^)Hy37mF^riGTBYHLbkNPSpdp=ZC+d_JsxIJ2e=DEVb7OyG#6|gB z3&ckYXbtKuR>No^&GG&=72Cm$vs8SwvVR^%rTp;uY;0a$XoBtTpJD?O)C2RcZqLb! z)+cG0-!=Y74`is^+A4p(D{9PrwRW6&x*fJTA)Y;H91zw$R1>%|dHy!1m8TRYXs))L zk^jY_7F7E7;WN5$SV^yMcF4DokGmG;j%L_lT2|+Dk%BNNxF`jw4plL7f}G|-s?1UW z;d}OdARa`!@;j88sBm#~I-zj3z{}NN(kUnP#`PO+UPv z51g$52RYlWJ_IdSyT!lF+5&#sp8k$SyQea`;59Ru2&TlpsavM-U`!~{f5OI6{f4*Z z>a8!;HwFqvM;$_V4!?4&5B=d*+WA4gs?o|2JmGo8Z+!qm#PPLC*AiFVeYuyYjI7`e zMg>n=+)%gq{^AaLkh;8lSs+5GieH;!At?8}7JikrbDTl^;ZapLY0B5UwT2tuZ2!adg zaO=G4B%rTp90rPo8|3gD05-d`|l&@Mqq4xOgYB!P*T{8AL&e1)}I@!Rd6B!7<&)& z5Wxqv`%Wd-wcDz;bhzi{0;l=(TAyvX#2C4@p6`xf2O?;h5j}YMnJkkp5&B(9Eqh<^ zB5pTp=XMB7V9ZEpPj>b}`FQ-H&gqta=|+92n@MN?p>2=ct>*MG<0Qq8VxErM(li}0+jIRaJO zS0CPF<#0&FhpVOv8*X!-yL;vH%R|{7gQGs@57B=qu>UT`d?9h-!O`KM6M<1ULg1!i z&(mP8W~_fc1ktQ8b;BM>GoTp63TIwE6qFA}JvLFrYf;v-4pN0rFea4n&bY5m_q9da z;Isxm@L<%Yi?JqJJ3g=LR6}Aun~Q?pPqCQt;UEyf&B;|!#R+axnJd<~FROO1dJolm z4AuElch-CdJTz_Y%hjnoJ9;hZ@>AO;W3y=2Zc}JJvwSNV<%v==FdwXTQM}{X3RT>_ z;@RnUnTs$9SI&tWKN~H)7?(X1HpY-@$caf~QX!+*6!xo}{XG*W!L7lF_x;`3)S%Ak zo}1P%L)jrD3y|1tF%cFaz(^eebsOGnlV9_>`C-!SsNe9`b_|@z{#51bdf4?FlZ@S4 zPsjeZS4Qf_sFooTNQGagL74gN;0>Xvg^}h6Vf1+XZn2eMxN%1ylynQvK2X1)ROEm1 zlUaN9A&%3qN@(bFPJaN?ON2f4IX52d36l5U+IaeY8a>+?eGH*kaaAEK>C`YlGJJp0 z3`-Fu;U}Qme1wE{TDau$J5S-U(Jq6!YyD+50e|A(v`OYy7`S}owDw^Stz-}nin=LR zw~nNTtR~&BB$&@bSFI!)Jr*}@@~GdjDx9nhaZgqCE&gWb`nyf4sIh6k$8yP9DXp~Od(f(ulad3 z|NMg^-#pXCjbz@P$&^r8s9*VJ&!Z7|Sd+4TQJkqng0|vg$y8zWaFkS`r@g!oJaAl) z+>x?aI?uc$W~np4fze4Tm1^2a@Z9PW2w8rpc4KAvvuRGw`LAzpc`VKJm-~4nOcj); z8J+n2T!(J{!RO`=GU3WY!R&K2OM3=`<}}8#6x;_YI^5XJqP?WZkg6_~#V+zHvw;n> z-XoU}!Z|`2;*H=z2r9yq%fZ6jrv4TmsdC5=$Rx zCknJ@D$n$F#udtqT|nHNiDCWL4jFqPQ7pjJ&}W3?nO&+-P=C2P;N4w`N~td$) zL8&SDd*Rtnklk^6#ZdXLKmSZ_c_{L$jalbN7|c_-T*%7g*9KT@#=zpK>NSPu4<9=l z`12y)nVUd10%tqi2DaCvPXV{~%h*(^9C*{5usD-$LUcGCGK`mWK&Y zZl~V(dC1p@nlf71T@Pu_?;f4KA62JbV`UFG=k^@)Y`n|ru19XztA$%qeUaN^UTT`I zIqTx&(ys!G(0%ugmtUquw>qQ6wvtsb2>`{7^*&>W!V?_#G7|zv0ZL& zwZf8Zw~Lp0lOH*pF4MU?P1exSiZP3^Zfmxh=cpSGf5@`A_;N3sKB*&RL@oOMCv2P&2zFH7NXs+i5Zw)k_K2i2;isqH=6=>KoNl{TaQgE%W zBztL6Yh(2BS&-f{$khIob%3@4GLDorb!+UnTwLUb0-)+Wc%%QC$@pzyn6rE3H&1_# zoC9ty#AwEOXWKaNLos1zP8M`pd|qZcddq_veb{e%p?T!_9ezU^j4Yj!gWO~8g~K}E zo^CBo=%%eYZKE(dw%x0L>xTF2t%uQ8JeC$jE3z&M7J1)Pz0}67)A|=DX`D_4kbMw9 zYvhT1miV_;4dwN80>Lcs{2r3eT1M5?ySu-9gy>*c``FP~?ZNZNfLrdcgvm9~Y*g8! z{_z9hxX^0^cAR}UrYLM|PV;d}9n*5how(j2o>CX-ur6jF+E!jMu#x+z;_1ORvtl)W z(mcdh&hOQTjks1d@v`D~1j zut+?vOaBB&R0R3zY|!_xdfrM}u^VTA!Nh?}QQ*K72OltB6<^v*#qL!{cUCWoj(IYQ z2NHi}Wer_UILqa(!}d0%3x#lZbkDI-7I6k0Zp{8`Zy$R5yttS#r)`D^z7H?T8xtq! z6At1s1qf$W(!*>qaBy~vq6KG=9IsDm<|bCk_(yh>UBQpGW*9zu+{qwms_51oDNHm=>toKL;3eZnFFDRC zM_?Cp!R=E!n=AK2O^$iMYZgDNQB9AO0w(3sK}L3vj@aNy7xdH}%0*A1zfT9uriTsa zNDXw)d@o98T(l^c%`(q_@)+oW$Mm0)W?>Du=eindi*#$OIY^gwDE`s4TrL#@Q73Zt zx9gBnaq^{IVY`hlg~ZJ<@a$=hyT-c|TvN<}DWu-vdaKy`WBx~=vIrr)mht@5RKLTq zE!kY>rNbmx=>9E{FnMqe%(qG1W&Q{VbK^=D!K1)@A?u4^vnH! zy680v>Cy9h9&tMcv+DDC-3rQQ%$=9uPRJ~({JZaU)nuMfQW}GOfbKjb>o;PumWA-S z^KV;7;n>s}%|aMdw+=s3yeM@5qkKe3*}OvvRIGLL0oYXiVMG2@A2eG@=fHM(b105M zsfL+X5eJW5x4OeVsX`kwtlL)nzLe;dET7VwZNxngFqZ1lUsm9E!U*_(7gMAsCsT!p zH-@$ma>c+P)d?8gJ#B-7cLqfiCfNzo)CM|HTy%wO>IC{W^xdF4dYNj3HZiv^-4&7Ps~&mKy!#209pd?`RW%J8G3E_y?l5 zuXBygA*O}?+8{p%s4L4>-ZxytorvL^Kxh^&5l)8RtXTQ@Sq>7Zll;T#TqlA5*8_@w z!*CUieipL(1Y@_Tml?^&K|dyr@diQX_Azpj=vaz(7oND0?s1K}J*MULo`%~AW^Jas zH>!;PBx}*|YQYkl`Q;l2it}>&yYJ`v^RGdv#VOM4Dbo%!X^HsFHxx7P)kT*P5d$KR za191d$uR9Z+#+@opY1dKr3(7R({2`nq? z{X$L;Atj{69av&m6+kJGf`5YNcJK zoR2?Xl+|bQOY43$F;FwXz`JOTe8%`3F7>iR$O0hk8y+ogYEAa2n-~mSzaw``TU@5l zN`9ayb7$k%MVP+vtAjxv@|9=k_k_Q5{eC0-MV0T=-_E0OVteSKdc(H`QIZL9H8>u? zCZnjQ8mRC3bgAOb;+4&ay_$C^J6qLYE!;!mB%!J_6Y&VTJ>LTX$Puhp037Mk5@hDVB&64o$^;>5_cd^K%DA4f%_ODNRKA`Z<4- zL2Covif!^Iky_9yf59k#=U7&WL{t26g8SlaQK zp|vi%yCIM$$EejVz-l=tY``bp(#os@??`iL?~4(g zpHzs}oxcXDT=s6P^kE)-c1=lpKbBJl_HWMhOO9|Eqi+tHs4wUS zZ!3sIZk^?E$@7~Vycc^659*r@nhHh?P%91qeWo7dJpw$F>BJ)2l1a2#h zch_wxw!ScI!Kr-PT$viXZqku*?DN!1`DlgaUkp;O<6fn*fqTQ&f9@Y7V0s=RP9j9Q z)BDKsP3=YXlqg81e`af~?6@7hNJ)ptx?4pxpi)xY252*8B1rMZv`vx0wX!~9SJw-y zi2?A}I91}#7t!o?5>qUL-r>LsMlX=G>bTGPJY>gcCO`&TsoZ2?a*T#~p9JYecSwBE zwNq?9({GwDe0gQQG5Dy(SH`}qv36~cks(|Zdc&h-OTQyH9!4iQ2DElWp5%MGBMiyX zkG=8RXRdOLzv#hP*MfHgqxIj<1qEEoiEd9PEl+&*>N!sL8}_kQRX0UNjQ0p=^6i$? zJU3Nyg)wm)0bi5pFbrse1lasj0?xr{!M>%8r^{m9np>|KsqN(o>FvAneGT;x?o6)^ z8q&t{rEV+T^R`?4&Z^PjKW|}u-xR`N|G|mSYyp@M&%8_!`RQYD>9L*V&kL|x4RDo@ zy>K94^kJ9j`P^)usxSR9k{^oc`29&MQ&0wTZ2T2pecPmD;O14B{r^a&79GT+jXn9p zlX!Wao=Ogo08~yO8G#ox9p0DA1*&YNDIQ~*{jc*yE;V!`-Y%EY}+KA@QZtIY1cihlR%4Y!wWQa6E; z>2=>bniA>{hh6(%Kk2OM2=mb&f(qF=iTXUKAxBzQQJ3 zGq0VH^=k~`dv!&*90MkPnbnD_&9$(ScBJXSQkmCou@1$JXD&EHJn*=4hY2eIW}ky~ zIaIa{OQt5S58sy?z@yJ~?gU%3+mKPqPBaVJs}L`le_pTSHV@ z)2uIf@uwbH;6c$H{cjI0YG-gC0{&@k7Yo3;7XLZ_-!uu6PxLV%GEflkMbKk?mc0h( zo>z2yo`65_?n1-1j8EcBU#a-Mqi%_JAHukBvd-RM%>X=+~KPNY>yd#xpk@_A5v{(1(^k=?et7imo}Bs?EG@Y$y(7ln{&bVnpjCV5(jz z(>ry(BkNUxeYr{F2|TJ>oZq_*dqJbkt?Bu*HA^PkXJfn2I<81cHW>es`?%;}TOk{Z zQ`;}8q8hfL(5-9uG^;dyzL4cl#z1u4=>4=<^BuZK}0kVR0{4(2aBY42;3rx)Z{Vgzy@nGrpoRZ$3jYG?IbRu^= z7|Ckx7*A#o13aKMH$ql1wfa3I25Fhk=KxUiTN8LCE&YsimsKQ1XA%nP*2_w5AtSq- zKGf05{(-DXyc`>dcP`ww1VKnX6+wqy;Rq<+Wc<9=GHZ2;OTstyFL3HuCUHQ=tFK zSL@m%0kFx5WN{VNQ#SAYkif*jk$yV>f9--eiG}3;mwWQ*YoMpf9*W12J~ZFLp`nh! z;xJ_f?d`mnK8KJx9PZlaVVGRZIIl>?X2Lsv3wmp-}`czO1#f*zosti9b%>-a`vG#T>H`n_nGQBBqMZZ(*($$ zj5e%-gUV-y>e;|orC!cfI2(WRY1r|1Te9E>&(Bvf=*_AlE%(Jh-rP||c`bqPK$ z+FqaWQ-=kp(wLbg&<4Fp0r_)q%Ju*AF(g-~{g^zzy|$(KW!(zjAZeghS>?l29Ju=b zTgnltJhCN82!`{PF?T{ZX=iO{Zd+q8WbK!tdhOxZ0eFFDi-q+N2YJ&}M50rLapMpJ z4WHTEAr4PMYW#JJxr&iJDPfe2_4Se1+G|(3(=>iX**3=SK=ssg!w}! zy^ybclJ@PcSf*8VK-K4DN&=IFKsZd{$ae4&Gc|bK_6d{Ar=3hrpw)cs_Ulb0-X3d^ zSk)NT#t-KLfmf+m0XO6;T zS^9Uc)Mxhf>gzww-w}J7blM(Vp0v>@Bv0Wd60mn#!e`+5U2m74E4hL3}XejfG($U06c# za?*2_0_b$YDGQ8AjPNGz zO>sJou+sRb4o**0&Q2eBJOd|W#IJaB*=bSN(*h@R$3JD=DL>j)JWPKoRh2VjbNM~Q zv+)(4(L)?je;{&IS42J~%fjC(5Wa}C>7fXD4+Clh_^=r_DzEl>8|gc>#0i;zZRp$C zK`!^wb&IChO;p~8jMAB1sK9H@!k2mOL3e}g!&B+1z(DHK+ND47)+QY$|Ci?BxHba= zdK{RNw{0cTOEHi zVv3NagdX{hNPsSg^>pOkfxW&ZCZe}i>*3&mES~WqvhGke80ac~=4ij0QhMi;Ycy;; z@=n0L@(@Cp@+V-_y8G){K8_Lp|C7j(B$vDD=aZ9fiTqxF4orZ(~?l`mZ%^)AW zUYzF6vc*`qhTD(g7sj@ic3}Q#f+hH;B7v1`!+rG8`<3^o|31F3u{9SQK3^(bw?0fZ z?1N1A{zon&*_wO12~P(mJRjjHJxnnp`kM`$XS(0|SbU_{NX~Zw1j`Q4s*J|z3|wD; zlwzthw?xJiiPm~=c_3X0lpKO=0xLcwY6^Px;8AtDr?N&j0S&2(1Gr-Rv7})Go9C*G zaB8M#zk9{|+Oyq>3R%U;Qo%>Moj&h%`fRLR;Mtj1$cG#M(i$&5lt2w&iWdjpY%V^y z?$olC#0CPgdT+;z%kL*~LE=0%4CHhD6c-ewhUUqxdgRXMmaa`hLUrTQk6<~-!Cv0q zUk?fFx_jQVJ$*PFB;uqy;KQ<8<<6C-@`u%-gGX~FSTdXlznPMtUfQkq^WsgxAx3XR z{#ehAeuc3oh9`03E^V#pBiDYuy)u|=@}^ag&@N~C{pBS{`AfDfx3ux{O!HF2vVeF{ zA!0%=YFa=z(YZSnRobQ69;AilS%OGnHa$5CHB`Z+g;4_dpq}8geUF8K9e7penrEH3 zmn?;e^XG0sY1o}uV()ag#Y$p4kD+x@G%Q{I8&ECq>bsoq8-mH6594qyqvx0`YzrQt z54tQW@BR0b-_3}iNuin@7{~z0qhNm%&rGybw>pM-U(1Z>Tvs9KHcW&EcO|G@C1gv`!ct}OTfvl-)Gn%eJf&WN za7jb^I#1#WhyWo(;sGdKn|^$caxi9?y}+N_9c%ji)ZX3eirobK+4cFmPEAkYxXzL{qayP?=15HT))Y3WO@Ma0#y zKM9U|XrC?0w7}mmnl(sBH7PRBpC#Vtfs*E}0WBt2?#wq~G~R9K$8Al9ZNKMClV=#g z$h+>p=6Ky;BmH2XE3Epf)RwwAK6{TTo_686ZGUn(Vxw7$^80Mt6+9~6WY+Y06pzAY zM#%;Z!4K#FQ2`yLiyEgt|6QD!flZg|LFEoj%(=T$ohpBxSa`zdxfxU`881#!Xc!Mj z+16o;6MeT^LpIvLiMc%WimO+jfcZkBODv>6wQ0oFAKM5z=6sH}NB7AGk$KoxJWoO~ zPSdMB3~TNa0lUn9%EjrnT5R(+?H<0na0U-{2fL4Y6w)v8s00h<)DER>dr89r9{l1ff<_HF*WKlZnd z(c(_v=8cHFs{?HSo0t)3nBa8GTIzJ9Fm(mKmB>J zy3r%_5RPxVU3_|4b2n3py~XiY?{L?%?^^MGq)4R zl@92^WMDanfUUqtoALW8z*`1ovufj|AVHPt5gtjv3J2s!%HdDSBfgtz`MMCOG%`xY z{MwN}du(04b`Y^X8m_oc?Q$-z{i=r>vQKHVx@dd0Iz?;sB`2|kn;voxm#hvt_+CtsPwi6Ov%^IBFAlWH|T7}NlAu|@!HpNp8E1CdAnJhRN5qY8}0@)+x{2jBQF0RX<< ze+h6>L=%ep?5*1#0IIEd!fThS?XKd#Il6t9*=xe^=J=)7mpb)%J=l~BgUj6yWUlhMI z1K1At`e78y@%u@hNwJIy^~9U8w@5ofD?01L9ELYoQ1?u?+yF4LP;_jlQvga@u6~}i z+px1yJ)tok-pQDgG2f|0W!laI6l8c*MgR3w7)6^twglQn3f zw`bSR*fB}s{l1mkB0S8((5y6V^=qKQA8=M0n<>_!M&Kam{yyoUM?z%LkQ;td9o7sT z*(>6`?W$QQf4d3^l>3YUXK1O7PUueEGX~wf4}=TlPia|nR;;PKI=9z^N?{fUuI()< zsr0zo$V9GW!%vPgF|8>7@$?>zsoZE#6Q^b3<}$wxp-UeqpPq7ra&m?;K7+C{=V{H;yGLoo6fJ|Hoo6z z^N4Zblxo>kIV+y#>c1*g0szF5zNKFWNU)Obj;H5ZhC~~R)&&SlOm*@qC?&nGwOx08 zzprz6o{1%piXef4PsLEXc2kZ&*}?HNY8G(i=ReV8B*?Hlv-a%H{QhB0MNE)pymuz0 zEvnCU<8{xR;OiaM3IKf#oPG2>g9X2yGqkr{=XV-G9vm(cZAU4jIf?W6@aDbD51-e_ zI1CEOxZ@u0G?pr#4>(yOm0maZh@83b6$ZS4=k4^9)Yt;VZ)sgoKW0+)2M>Rgi2JQG z-E(cr7EyR#%ym5@4(i!;_wk(iAqxRebsS$|aSkt-CJbvL0m}ClMK|`%)``)IRtd|N zhXlc^1GPBn4T3qJY>OK$DY{!|%4QGFW#2HMNZ{M5F$uJHOij@*Bfp^tWxt-eB)diyNB|^L4_de24x4+gxqR#~&IJP#1Zk-`GTN+&gurAD5SyslLSG_44?WROuU zG_U7}_fM5jKq0v_CE)uln@6r)N9Iz%_1PC*c-aEb}xE9fBi%&qRk- zevfGdHJ!%t*g%M>`41EAQ57WsYRu-V-r1{=Gl!ID0ToSCR@nLg?MwH4ZR7Z3oB^Yu zQ>X%wC$`P)^l@#m>Pt4fs+WfcBYRl$`s!~I)oe9O?IO!W`~AaXb-8z{7U%M!xOcMB zv_b>6V>z@9Rr}*p-1#2Hvt*xAOj_%-<9)4)eO92#3C!!tnchoeq4e9(i|ipp4^Gh* zsv0`MIl_cp80LE^5&c?toMMdU;3lbWuP-dv)AY?d+Rw6K2fqVnTu)yh<1K@nwjtsw zx13flDoic6zeH-==`EurNqd{V=jnT#<$6#fonknbs~P}y13}Yp=rI#e9Wv3Q9cGxP zkS=)MP$*{x7OOqr+v^83-dsw^tQI!V{I2cx+6D$e>;0p6V{=g??^uSe%!Ax3=4HP% zK_aT0Ydb^XAn#GIDveJ87x8&L4xSb+n~@;S`U6%$fJGlC3h**z5W^==_ufyl(jD>$ zK4t^nYvW5hl->i)Uhwqgl7;rcSZMR+1RdK1^yZznf)$nGH{Y&vJ|5pxHB-Egy^upP zTJ_)&iT<7Yz(kc)m_+-qu2bCUs_td^%VG&0x1FITu>{fA$s;`a74j95o_QFm*vE7eLHK3Z-7UQ9C zoq*Io_$kmT9^J%R*!l7U#o_bq9tM3KNDR7I(F95S+}9%ns$M;0kcq#xP;4%_3-xRt z5MM4+-uXCAi4#jUU7hHO0$%5Q;QcQ9*AyHrQP8vYO9Na8NwMB;#53G&~|iWXr6 zwwJMa)`P#ZV~Mg*`Qb$bN`G5jka%6A^IFsVqbFx}alA+>wa5h^eG2}!>=7@gRY>I% z6Rn+s;q_`1z&U%bw%p~Jg47@C{n^Gy#fmOKVN`cY=n7W`9xoa1LXS;`?lNI+q2E}0 zWz;`P`dtSoR-TuCO9A+TomEmtK?w2rIwfyiBo+-_M+u^_n-S%`hh{_p*_{_=;_DVy z1HB#6h`^2UiB?cR7@xkEVhrlXDs;yX!U3-HhS9B)LA%8-DcrKq0Ii>4?tUfT?_?Ux z5RenF(pkNe$h`p)CJW>p+VckrUcPMwJwwx@%~~vg*m}%z&j~jD(aTbFf@%9(XZPs) z&cPNG-!he_v;C1c84cdg_)I$g=B_8lvlc*sQNQ(}E zbT{V^lJR0C5`WzQMZcJ`khZkcYPLDxdCGA+rX)LLjg2$q*X z%7X&d`h&ap>ZK1?Xc|bQrqF=8FFk{Mr{d=wObaX^&Lb9I z25u2^All3!##rqKyfE<+9tRKd5zoZh-oMa)oP1Mllt}F?>J-8WxbT4rf?nSUIjJ^z zCOqrI`b3(QF|aOG9qb9rwS`4g^)L5ZU1~WvZy`Fr!mM3K(^VAMPnT~{tni0{=hXg6 zYCsu7r6w={q1b`G1H4Mpm^RRd`Yz0B0|=W|FU=yW?q=S5>NKP$PWqE*vm1oE7pD z#oHx5a#=r@0^LLHfx5TCJz*;bZ4mAaMU&4b@*nbvX^Gq(1NPa^YiD)9y*fGdj~1q@ zJV6n1=>jb%_O@iux&|FjEcQMQ@xX(%PBvEw=niE4J|(XY0y}_n9RiNLKL|Fy18|I| zAO^JEEsQj(Q8|=yppdHYq9A?y!YCi;w()u>C6`LHiGbCr*Asr&+Q39Zq64gM`VD#& zWDvA)kF79`988Lwszpvqzv(z~;4WY@MXiqn2Or#N=pohnpZ!!;$|+P@tO#2lNDcdd zK!dV?Cqx69^{}#s*rBBZL|w>|miQS0nu(cE9ulyjc_IMV$*UVtzfDDnPOPS`LQ~gMiPznHDm!t29)HAp<|~3e$}&42B~iY1hS@2fri3L zbgv%%HnM?X5iDb)xFMAuL%f5B_HR=A3_zHE29W3dO|~|wCHL5gu5W-kP*c^`Fh$71Jni3)Hdr_b`Zg*91NVc*8crQ?3xV{w%h^a zkHEC3_4$zzK?*cPJ7wowfp1&@)_-r4zsfVb-WgYrV zM}@tJ*(=xQcNqx>gb1KbNkhuu2`v*ab~~ryuXF1-W)^y-9Y1V9r9)-AU7!EK-@3LT zyKW%~TWbvPJ)ZISQ0Hks{>G=29ZF&j)5VKTFl9b1ax9uuezgb+YkBQdwz;cq8>lR6 z93)oIaN$RHa%a`PdW!|ledPksNRUw2>SNn{0QmmM@+vf@wQ4w+(K#ijl4IY$mVOlK zcnmS2k9rX;4;?aJPb3zjZ8zzaekipwsVwzU*^{D7o|ioE{+oY$!|P`w0B0@Pd2C7} zs(DUoJ6s&TB}ch4V7V~O`2M1)%vT3fdAO1D`V61wEmneAn&j!Rj`;aykR_8r zi;AB!%0&iSPEh5xT%=~phX=7Vf%C(Bfc%!5vpDme>DHAu z$}tCQ3H*jSL<23J!zZ9^??4W;EtI`*Dca2VMGzkPe?x-4^GDmkm&IZn)t@fQ;QlagprxMkJbgakW;r3!W08jWStKTpfJ)z9umKHxXkZ}%`!-(7lvzN0@C*R)jcQx9m%g2$w&G2#0Q7S~ zGF)?MX|5oA*J*V9(HHKEiAX#pk~1?l<<$qA@13sslZ^Q5P;yENQo-(k*s1{Lp$sf3 zIQH>K0wQW!PoM6{6HaS!YJubO4=l)6$+d_|d9PETV}JNmgyn4Rr;Jmcd}<&2oeT7q zDLJa=$3_<$$}8zxbDK}(ds#qtZd@=f&=gCq3uwtY$bdC72d(f;486?6^H7MYwGpyO z;tzW83@^lhu-tOdz}g8xLx+mk{t8Fx)C%i7=r$s;Tsm4 z+W8L89eT|2eV2FYt~2w$h%a!z8`<+s#OBuN|8zbRB^NdE0o}77PhLB41M=o5pq)+& zU^w>#+YRdgv?VuoUCh{K;UVOPt+iQl7qlWpXgQXzd{>t*a+bv9i!d4(pPidNkH`s zeprR_Z9b@8;{Q4uqSm~Gr`=n0*vzcEI|2cxkYEL?44Rm$EKbjT{`^<(K^OsTto78i zE@VU-YoMSMSdf-{6tc)@57DvtJDii|_*0b3bH3ghqkC`5f%1eKYEYGh>C>Uo{Ux6N zBM0wlJ8_#V1 zoj#i|VaUIW^s75*2KUbC0FcD6!{Z=zexkj{rQ3r4iPi}tPP2!wQrkdZuunRig#5V3 z_z+p_TXR**tN%o-gprd_J({?BH)LBXErA{%!i5!ktx8+vA-rEyYIb_Wkj4(Vpa%7luh_Px-n9Ft%R}ZPxQEWN67xJ?qw!p_T&wUyA3LC%NK?&o`#UN^^+uAqR%-| za2AbDd^(uSSmmtneg6B_GZQi^q(-Ao>TWSB+caaue}xXy zDG3n!%nYoX|7p7z0IJ9Kq`>zAi=T6POI%>N3%P&*Q}UMs`luTPW$b^%TTHL%bKdCi zdP9mgdrfBmxWN!B_|f*e(135rGQn{zsK&OG@%m~;L_R) zU0$<7j8cy;2&4I@YH0-F-l}5P(yo7pPZcRSA-P8bX#)jRi{&?cIFbsM%kAI3?QoNnG&f{ASoao58kEC<$03!~oLIJwg-jO!G(mmZ}e^3Z#Vi>98g$ z{CPD8HwT37KygO;3od?z7Lc464j<8Y>dSvmyK@_ew{~U^z4s`;H<7|}0#dSDEN!U1 zyG?^-IQ?&IrU^jUO9zd&p!BCH7iWe=bAT{a|A9*j-We*F$KlVsa9#gzNkAmIzDs&n zx}h`NJ$j&R`M_43Fp8+{3{TN`7ggXBlKJc;|6B)1>1j`$23vIZY%LohMh?n6ypfbU zZdBlsZ4_1cPr>P~DDg+s{APiNXa51a^D@8a?Wu8BH#5+n zTMbz%SF;+Ha@K(P5`+9}OYd@tOMf~o-AKLk!n_EkMHNY&$vU7IfJyqysHDn_z)QiI z${XHL#TA(tE+OFwjLbLccZu$Z;dP22(XQd%v_sC29PW6<=rb}kI3UIt0JQ>HOF`Sn zcUNQodH^5{>XTa>h0j{rF0?p+)R#jX?!E-)GGlEvX^=XS9zc>6kKQoFw?*amR4B6^ zu2zdst+LKJDF&Uv07Yw+@mYfTKHF*U-qrt(HK?aWf}}Lh$1|k_b>b@$+J%#8{CM$) zf76-1Q?k>d-V@=2V2DDD_#tma#z+#*EdjI=fb8L9!!&*hn!X+I$x-vE$i0OB%w3;8 z*$WD$QGefkzof{(?viO9b%kCuR7OJ=QH2KCn#V5vQS!t*;tP8EEJ^2!_vnDOp&cfRdJy`k+ubWY#kTAzB@ zXG!YSlV*iAcOy|`O_jzbZ98$wry54QB#;m!6J5F|2h8`~*}Sn3DPs9I(_6W%Wz6*rl4_a;iKic&9ZRL8BqJ0i? zMhx8@ybfa2MaG^$s&A$|=+v`$N0reecY#X>Fn{!$Pi@+wgW9rDY^V5oeI8Js$ElAO zr07BQ1evJ**`-!t$t(_NIS$VOue^U1blH}O{aE_nIhYYql8;($mA;Us*Qk_6wHvY> zQZ!*xFSQ3OdqV6j{@glxTUE%oh?~m`?mlP7_qxC#jK?6%24ld+`3Q034w@HZz%kP%$XJMD zyGL){QTS3wGQqJ}_=L>iB-7hy4BGUzTL~I1C#P`r_J^@wk?AVuTv|qUyY4tgsbzI{ z*Qu=MFNgQchR$pFe*BmkwD427ki>?EkebG1MLY4qrx<^;&aGYCh!J!yn;?Ztgr?AB z@UbvNSYxjz2JaID$(lH^0cjEMPMBOf)BHjPIAE_@?+Z||1 zhD$?w!}>+LGK3)*tzUQ<>1p@TjdW+?EvX16;L1Xi`(|tNnI7>#D$A3-U0$=t2hIQc z;zy+5-Ie`j56r{re{JvWrk_J$3I47}iThGnyd3lOrz9E>~_sWW$n0M(nb=G&<0}Q3uc;Z znBH=Cor+knW>$r2n+5itBSYif;>MQnkYmR}i4}72;G*mXGH_3i3(|)Wt5W2*6z8Cg zv%lSIp0YuSeBE*QEy~yPBGgqlf`5x+FoaQdhjuQLW;qkkKy^92WoG?P7$z-Oes(rg=tU}x(`<>TH#e_qq3&)=k+IKsT1`yF3gyi%IMg%ViiYV!-m6j z;SanaaV28nC$zBHa+~|8$5x~=x$I)rpGAX@AN zi0``eS&5G2+pm;e&(nvPhNQC^eBN>_%9trg$DnzY11wo#v}!sS=t5dxS;>M?$N0@T zGn#$L5HI02_{SsVB`M}#ZTtGf>g6)1%hW55=&^WV);E)CdHT0d%M2UT?xjs($ZBvo zRIle!OA}PC?UW((v9y7Sx?L|gLhdsZv@|(M?E2re_>mhIQajIl*>IZ;601Gq+2YX0 zBQYm;o-3Ee6K|)olr_1kiMkUrnc6%=Ac8+z zNKwMA7mYESyGo*RNOj|55Ug~Dyqh5_N))-a#yn`=mvHtXO9ogtVI>B{=t8Sy>bs5n z_-c~$EvpD-BynS~^~#U#z^_;y_OEt*7wEal2`9V};LF~K2CWL!Z2l zox8uVr|0k+b@A9k?9C)Y8?byc?0%*sSBVx%N!;QA$b=a!4EbPV_9V|uK1&VCnCLJ_ z=JwkUKcgGywp67R$((swqS9He4V4}*OY#OaSfx0V4>e>OB z7<#VUeMfYeVKlhmiX98uW_pQnvYlTzpEM~U(D;2RH@gFi3$1uacxDNW9AWBVCTMaj zrc#g($>UxvvuX{RQ`YN1&Xv$0#vKXN%p5d$$l zbN75Aci!FXhQ-$-eag%}y2pV*`D#K8-hDC_TwTKPIL6bE4CGztsuI9& zW@5Ibzr;vPX08!CeqFzve()XTpw(7tix5WP$GeZhh+_iG%Jw)LmHzJp1?iRNK5s9{ z*CO}%m}WB*JlPjhTLU(Qq-NGJ-~IHGuVeH9EfLFjSWTtw&dO&W`gyT)tQYGKp4UbG zv;tSEWtmbTMTx6_x_A73ETb46@@^Y^bBqV`%$G=?I&je-V;Yi8O; zXW%%bN!Ts-c--eeU2?LgVJQy{I4=n9k{rS+^iiw1RaC_D)ez^S>K|E6+JTAKcbT|1 z>2=?RR_i3z%8Vr60?YptaV0>=*ZSn^KeId4pSbs>VZO^l$=q!EPz^DTF%J3`C}Z|P zAmm|*l_=85^V|y>!bi46SBi{8(}JMcvgk`)NI+f$j5S__XV&->v)|RWxiafKG4tbu z^tJTMz>w2O`aLSSCrc6~?FM|&!`5Hb*pDbwNasDGrQE;f>eTlll|}Y3e1Y}*$y|2e zTV7EV&PHgN|P++3lv7->&OYr|V#ZTsZX~HeiOUUmxLhEBwkpi7K z<5Ry|UwK5FnlBJN&u5I#>cZn=OfLTg9G(@=bo@Fb}S%ur7Sd`mnnq_z1wv? zxchiJ9-4kKxBUt@Y8UKubSlfE)JQp2ZeVilkOGaAhb5%Dsx6ebI6CLghMFoK;GW2c zU6|rg4j4)`4*BMDei1SYw|x}r;O*r*!-do|BA+*#GSjS>UDjj6C``70^ap>D%!)@f zy2lXt%h(H?%{1$~_MY<}!5bh5#_S>XeVlb$aV-8FrrapMCU?0Mb0e4;aga+5i&OX=cVeWH7u|a_w4%yZ_lqM{HmWx zRJlJ-@;*VHI3mD?*KbCvr;|7+Lmp$TY4NRYv}Mj0*r2P`rpLdfWwG$ruq~z&M@xSf zw$p2X%f7@O^6c2olsz8gi&%~1!Btw(CVrMUK? zJjQ}eg*YgFS#`f>(RlmptATl4$9QciCB7Vd&xaU30KJ(@VgB$ z|8UoK4R~bH$)gO2S6)~m9zZ?Bqq81jy$67T+^D~EJHV-4k zkF%Tw- zPlwS2aR**0nlry{P)tXABTMCT1~In`S+}t0n-*AFV$n6SXR&%oS^k0;jMV3^_*$sz zG1=u2PW&D{Dj1SjqGtZv2c|=QJwgoL&R_IZV7>8sgcGAM*bWn(0;`gQ(&BNwrSGY5 zsu0Ebh zpoB_BnR#Z5U7uy-HySrC$7=b=5}^vo#RcW1Z#ks2ma`jq6+K0f0&EYN!RRg>Gcc5N zcZl(2MdkDj%_u=jo12al$V04Lvg*3?qbNi6D*l|4ZF?bm- z=S`7YS(S>;G?p3{I}H+HU(CjWh3b7V)f#5xY}Hh6^N>e8DL%|4VSOwvmvvxr?f{(9IMqAU$Jjiv6&i`29D&x9hU!sk-!B@6K*!CUz&3juIG zdnJx9E$YJDXjZZaq-H4B4U>tvfG|eo(0J9Z`Ew7mN)AlMbOD>I=UM+MJ2#$!R({hB=SidxO&*{(bHpY=H#DiFj zJypxJPfxq4WPzTR(J8*!gMCdgZ z-lkkWNbU(KC8msC=paluhCk?AZBGlPw5K&U|K1-C)+T&CYIwm~=j1Glmv+FwJQz=4 z=fFxJ6Efjm(JOo!d+2jwG0`kxHe$Lwc2u*9ZPE8*PxB4Z`rhH|>8ul}`NQEH5tIv7 zYt60Um3xHLi=DwM`NKLh;l%~9(WCmaL^EL}5f};4&>+r=TmR-%KXH4P;qY|0qqzFL zqDZ#c3%wl3X41ulW@i$mZf{g>V>^F#A+mVvP;=`l#hF5q44XTrx%MLX;fP_Xpv=%p zt{!P&A@F5hoS(WN2=|oEAkubOPj|c@y8>KCivC-7>_M`L;=Q8SHXn)4PhVX#0X|R0D@8#M0!=J(j>IdBT|%NL3#-wMM^;FRUV~? zv`|9}q96p39zX)fdk25-IsXqHJe#?9XLicY>|PUl+d%v5Y4+0~5a{f!o0>)-5ST*y z2Rj9f^o)Ib2K)j07-_46@co=iAP_I;mL}?MfXzzQb6-{kmZvorD-j?>Su7|9qzIXV zok4^PfcQb$b}ZmakY>aI9Q+8nzt1>4vk}03cw5(`z;My)Ouglm5R*B#TS7~Tnh_wV zC?cR*TFch-2!9k>e=zeT>DAw2^Z+`<>~N+v^iW4+$c1P;>UT6l zWc(Hlf}V!|$yei$27_S-Y<7nVW}$E3y14ByYS8YeK&gp$%9rKyppX^l(d7N2%8QU0 z7%zE7DXJxz9-SvTxC#w2X}dP1Qs>SbLf_Q-w`Fs8^=QE(hsp=f24T5pI?!IHAf=xU ziAK$eh@9MpbrU}aac?5o#uh+T7 z8Wt}?t>~$M9oRfjf=2*+|M#3?&jHrYkLHOdW06b!W~c@&Rn~ zktWea(wnPVvsA^CkY(sS?KRqe_<;T4n;u6CH;Piu_Y4O3V=abnlY>1WaWg^rKTyk1 zIC`1;Q>#8ya8le@hk0XlmhC2rmku01K42a+5b+AB!=gseWhrWT550FI^Bv_qYoI3# z+#UkEmR)WDLRG9n&Wxi#2w|l%xY~4i07S8tS&cx?=mu@tyY9AQ&jdw!e-yl* zZC9xYLeCW2Nbjx^r77CCA#kKs_l6iGF<4*ro z&FR=K2_Fvd!=5mUXNq(aaA6ZbAd^bDZ^c@3o~cHjDvWs$a^bwtIxCD-ARKpIgA}Y0 z*O|c-u>e8`O%Ej3b;{6%Go}cI<5GT-9NgkMEh)$l8)00{4pYzJ3L&CR6i}6UrL3(> z9Z4_k*R5c;kfKhO;UJhEX+8 zs>uj19F<`wCQF@$@=C;oYb}>mG2QPR<^Z^i>|lkIB^u^-HNoq3!+rhu9hkw;5aw1# zTdgk{zRFzUYT@plCwa9WK~Rb0(LHM9i-E4>biPW1a46Ej0S?f@@s2$IXyw#E_HwgO zrc1nJ?&iA#T9M7%K^9RU0Y3-3{}FNngx)e-zBgx)1(b9RS5AzF<7)&Z4Xjwo&F8-$ z(d(Wb{^5tvZ>v+o=nDtLKxmd4JVLvt$7o#JKuN8Q2fXuse``XNO@Lfw=W8q?+|=NG zE=t`_JfhC(TVu){G98>kSS3v;kTmc-AZA}cT=)~v#SO^=`I7T6>g7jx+acthqL{w- zibh;?n+k;4ue`!7D{BY_hPEKI#fjajGY>j-2qN%sH4dnmt>@=~=QjN0jjTRoU@*_H z2JE*0?x&13NP}Jvgkt(V{nObh|MA23#=M}2TKdXX?&%{KMvuvfaUu4l_oY+-I@RwWB!Ws^fSkA zre?5+V(56?YFibo(OURcThofOUwQj=m>Nz9vXB#lnunqMYZiPb~y`y7i))?yFZ-}ApZ>a zA-O1wRYSqBalW=j44*BawkP*?ko!H0i+T+3@NjELN)LmL{B=Jq9zL?vCu{s`AOF4P zxOZF-_&O@wcRTGc5;s7X`q^ROzHT2tZwQH@#)WDS79;i_rFHk)OKzSK(s5gN?|C z5Wue?%|E#oY&~8nqBE}ozlDRKoh*cl>jR!*Weye8n^7?8O|O`}0Iz0xU^+Vnu$W&O zoE=7$5BM#~(-mLG3#{^6pv`7GB(gS`2gSq4D9O_wCr2AT!xvu6lv@+bhT=JOR+6Xl zc@ZBl8Ycvd3P$60>7r71ojP9AR(uRs8=gT=v0+iQ!QWhJ^bH>4D{q-$l+R9qM%5rKNgf(?kuXgZ<0NM_yorExN3uhkw z!%&Hdita{2hFBmmUqSf1N@=GFX1qx6>v9?+s{v~GXBq>5ht*4O11wpPtvx}1O76LytJ5@&A6KO^@AV@rm^Qv@1GSph&aeS}PFIL(d z@EAkfyC+T-%;@qox0TF)-><-(9TPEEvHd&Mek#orR+@n!BQDie)$o!Z8P0t^d%Qew zLNC6Q)I5*AJ}ICr9`Xd(VM0#$^=~y<`pdugbRVFE^OYZb89LTwK7C|=aH1%AI$IXG z5Qw1dfEfhor6sWKB~*fY4LvV*cL`2Y@FS?Y%{=oR!ka(1<=a4b1||hXxI;e)b>Cn? zzncBC4#)v~%mewJ5FIZ_&V&+$BLhzXLm(&;qVp=~mC>vECh?chLsGo4O~WpX>)#Bo zpr0}Y&)*;@C>1{VGt5St)C{UVoy(0Pn9C(@|Cc96n?Z*K_C9;n8W z-d$drIhp0VmC7SZ3-oHZQ()r$urdR3zt1gF(VfOmjQ;H90`$`R=Qby^d?uB(IAm!` zt~6|0!8Dqpj$RpI996|C;m2HZGJ6HiG-;z2GyS4-81RuOw8e;qJ2C=0W(*v>qp2q9txAl7p zJYX6$@*F2MYI9A~`2uaDwxH;U=UU5!&(EWrN=WDn_M9}?2~aD&mLNE~a8e`8fJ_?# zpNIAuwpgAAhOFBav}jVn4BLXontXxpln27N$%5X=+?t>r79g-V zRBuJUPLTXtVVC*SI4v-NApCZ&o{d5%VMGEG?0B^R0Xs*}#&gGB6R^dBR98F>0gkk7Q0}xw{4fbql}-FLOl6)VS+u!Y86n?A*i--6-wo}h0w_Qd@z2}K znJ-`05$B_koVBO^8>7tvwT}Kmmbmyo{o+g@Rf$83Y-oe0ZZha4x#LYLV@Z{n3-LfL z_WD8Cj~U=y&uA;lrzo7v;uwY5A6r0pn>C~=erS;uy{af2M>{6~(f#Rr{cNdL*nV;* z{8%ku*rQA9PC84b9`Zi)$o#G8>^zm@&tueQe-{dP`uIDFp%!`5{?E8t{#I9m_L2__ zN!P7O-!OX^Kq24x^;aqQkA!l-y4{BM5%JI(cJHV)VV+oFdn@d2YuHifzDXGI=;+VJ z{f%uZmAca~YnHJz(~}}T+?ALfS8c8mLQOI-FnGisZNk4!UFhoB&%C?KL#XJD8kV*H&YcU+W%<+7(_{Hkd|XLCUN9V&2<>t!g?{j` z@3}_i6x66>6XqRE4qqn^?f6_Zu>E`CF|?%bWu#L02$F@(QIhWl-?~u zeq?f$o55XnJpNN+V&d@k@3}$XZ;LnLXNA@m7Z;z4V4j_Yz#7l0LM!>F*lIJe{r74> zFCR8Aut(JK|Cl4yCKTQ5*9DHABaGNs>LEs$1F5R1mWKBFzl$RGZK>C>B^ z_FBKqC1&11Q3C_j8|=cuUHa$H22pNGkZ5#{%qCy;LLi&{K-W3cK-a5u4|+YFgp4cX z-^#+wL5SNQGG5h;W5yVWznN~0=ctDDi=bI<3XoZ5H-5=NcsxApf@IhRSW(2i^mLEi zkp2a;$IZm&;k8*J-{V)DTD;DM6|dChO63buJfD>d~v5C%vQ=`ZA}B(qIxx(Q_pTBoAMspkKVUCY7a#b zlWnZ5B(5<1aMW7f=ZeLxIkmIo(>J$v8wROTR*vaz4@Kmdo@x@K&u!<&4N?aO5vH{s z-dC~QS7+k49Y?D7FQA+nNvVr&Y55|4@n5^`Dt<;>UvX{X6Ye89#p6<9KSo(zR9aEd zBBXhE%71FaOLffLgFB!8yS+?YB8sn+!o2AsWmcI*G<3UL&-sIW23aDA4g;ZPQI zr6Pn=%O^z`n=RK`yuuyv-|(u&$*iTF&U}NQ9+{K6F0Vn$zmE2}P@Y>-x9f>d3Q5tY zk;5xbTU_2LtXx}kl-A{b=k%QM40RnMX-qfsp8)g%zXk2LYKST@@4L56XrT92f1A) zttVE4bZt>#$wo%%oI>*8cBT3!POCP$05=1idUrqS%m(V*qJOLi&=5Q4vxbx`k(jUdoh5yVI?C%KmCAka-)lv9(%dg|aynuYsGc{@~JAiB)wJ<_p7%v#yoTI5^1 zt4%lyHM6`#F8i4!P;pFzL-}AZjrB6|5TH8HY$-U4%8_y{4muIBCQ{I4tVN#2vq|HV z2JyZ=c(t>??Cz5qTU!kbvy~(rN2Pt>6qkd5AXYGaV`Rwv2|4veznVL}?jPHCq^JJI zx>{GSJc40HZpx45h&YNTs`YfrppD`aEv;o}|U+&WEbS-sl}>Tg4c|UUu71~>`K<`b~kt;u&e*+et$IGkkG12Fjr+_V_?oYIyC&4Ut=wJ zB_$BDrRF{qQ z3-r5-13Bj{x5^SuS+e=Z4{bkBCSq|x+nGU4b*tLngvj4K#7tDZzWE|wlpW1>ukgoz zQVsqyYalu=oOPD)wXtbzuqNAMJEc6t+vU(_bg0jNKlt45{d9-vdOCCl&L=eJ1h><1Y~2h3HjPZOMl&!h#>&oQG<)^j+_|vZjd0K?QY2!vF0jY zV`WelK(>Fx`J?+cRCXq@RiDssq@;MXc}{DTXP4U`l>~X& z-)mFeb(P*jP=<0qla_qzfv>eZSC> zhpq~Bx-+Ahe_VpsuDe!i%IjD7^erEL84~x{^nL5Ir!4e^kIWW2DdYZmuy1U$#{`lA z&5D%ol4KS^7<3zHYiswFzhOG7TCVEmuB<+&7pF$1~d+g zjF548^9BJI3k1jo0+~BwG$Np4Fo^NtltO4Nesv(SUVq3LaXD|*J}Cj0`25bySDje{ zhv`P#Yk0~V--fUJas7xbrH#C~(2v1;Kd5>EO!;W2vSG7Pk01m&e7HmX;)@Dnb)|2uCk-s6P-Hy zy*WyEROkwzVA6P@rb7I<^0^+TX3L}Bfr>grV=MNmZQ0=v6GFF_+(LoMLh$}hpS5fi zSrIKBOU3`I}Q$fPP)tYAF`KY~-uM*+CDk==NeybBi<( zF>Jfv`gC+&9@-LoF!VzUa!l5LakW3MWA2{JtJz<4X@xVscgu<#Bo!D@yR4{Ocjcxf z(hq6832O0-zmv*t@);-Jw?BJVUiXdmCM3Hml^0p+C1?n~MF}d98ueSbK$hB+J-gvK#jIV9D`o(lu9E zjh?BIrkXN$D9O=E&UMsb*+Q61Cn98>q2sOAHig~?taBpIQ5rwf95}b&r>Ng_v{w}B zF>#remfCe-+vbvnb`1y*_@X zhe>#4O5?ir}e@^=tCE`J9B(e z_4_s+_p2mML;}@`xJ_zG8xJ!D*!OsXNGE-OnQC3q(?`=;m-@D@GYCp5tEyIfg`+B- z<#evGn8gtkg!*qe2gt9!k($ba4l_zAAN+8(S4Co1RmYxe+wqYznHSqi>FDU5UllJu z&tRN8cG0|U;PX(z(|s`Tr1Nq*Gh{%B94#V5dTJo4*Y= zUtEaBSth)=8vvJf;fILbVUU!5z!5Uu^Dpef#n@G2Kx21Qm29rD_FXTQHlo20wIN0xL@zAesHSI$T7 zRQ|)noXcRho3x>Lx$#fqLkl=RR{TNcAdw(515?%*y|Rp9CmfX7P=P?4CGhuqK* z&D$GaGodO1WS<{>ck8x!IDakhJlCKTc?pC2H?_EA8LkM$-pn~E9MYgp;0pLkO|F?V z&SA774{0*#D$CU2`N(3Qksk&D7JuEH6RTC(zNMbI0ZO;a-hR-VZYhMI&_{HX`oRB) zp@=VUCoU9cA-CX_t#pSgQ*}$fe~TJAz3X5FXI&_I**I$CBaXy+$XrRUVLCF|Z z)D2Y(4-da>>MBo8-^~*s-^C!QKs5ApK;6^m@)~FVU4BX?$c&a5(>miX3S(A(Hx6KP;j|iA1q;wC+hwl z>`5sH*zNx2(IPWqo!`d7kXV)L=zwzIF1-ap$<=>c*?ZeTDd#yzxPy<};1G*h5N;c+ z*pkYg#`>Zb$;56kS<8s+(jWz@ZxDH8ddNu@cb}tHp896lI9_pr`cqVx>y>^Q?m8GV1WcJJG$Pz{SK%+zFPtXd$e6;=+lRN4A+(Z z3{=IBv0QhvA?bd?Eq-BiQ3TS+@-d>MxVZT1LXyaPn6uMUDCzOLw2u*h%f(%CZyIB# zrX&qjU20l;J3LlVIseC0I6|7Bw;cVoSP@xpyXf55Wf>CHZ>szni}KXk&fm=aMCVOS%o;?ec)&)IgsgHE()haA|UJm=^_?(ST>aR-d^ z7QweTjxi4n_I1p5fLqc+jMHy7+9N#rb#r4;biE$IQKrGS>!v zz?P4aaHR)|KOWkM=(TA$WWE@gF}OQ^EjPU?)Y_G<$xl7=w;Dm?X=r5s!61F3!3)BO zVuu8nNxSWI&eLuE&mPil>CvAbKr41$R%D$(psRAH^XwaQ$$pXeC!bcbH^%dVYAF-c zSnuRQP5KObsFLVXvG%>fR|tyzM#pd!bG?DcwK0N8J?`PXvArw1n;I#1Fe*7I#oaw* zE76361ieg$Xk1E+-X!_ftd3KVFSh^WHRX4sp}jrGMtSp_RwXOq;wwRWH|{81E6eF& zgpzd8a`r{i!QPc|rxzO(HyTn7$)CC}leBLKvS(R76M>SngXJ84rlFfz&(M-i?CfH{((*=|PFENFx0KCI`t`zLKW-thzjS7r&Yy89(XgL}Jc z-_Dy-EJR?9=M3$lLy4ma{RRY{w@IPc4kj>Dmye&cf^xs3mHGL%zOG{l=W6>0zV4`G z4D$4sJWU4jh>t$?aRNL+>3-+$xv9o+HDK6`Wk&JF-zlTI@1;p%j%Ul9~#f(?*jA6KJPg|^*9_Ev>UhBpT z@X%?vQshVRe?=}De1uzEfi((wO1t+sFi{(ERp&|OdgK=ug}RnXiyU0U-wS&vJlamu zo7mpn+{{+97}TZV)3kX5LsP1DUv(nAp(w?+&#cCicoL%}p~cL3Q<$ zW0vn9`|Ts;pd?Gdr#1fX>43C+qH%%njr;Xnr1>)0!mv&@{+9$4llI%)RFf$3yxS|w z9fqkO%;oOUU;Z5OGPSzVUH`~&dGcVrs&4k0g2#4rjn=Oi%+8B?8K9+r(wU31HT9v=3^dKYz`>CEJYh5{MwW^`?1RPnCYdW^Qx^M*z@>lbhe z0@Z!Bfj+oC&EunfoO#Y;;bCRDRrT#aYk`$@-d}+yI+{|SM|G6DmfENjwmL;?;`!Gt z=)C3ci8RTFNUd^XI~TQvP-5 z@MSqyG6S<{k50htKG)4of77;dan<^229T@ztU1a&`RXKqyF;z>{AOkSyn&%}s?KwG zA}!%c;Uj)r)r7AFZmdrriHS$0UZF2Rh5|z@HZs*Y)vqz5vjj@+AUj0B%c{;%4iBwE zq$IiaO5JX^RHXQ~Y%Mq~=SYd`p4SNDUO%`Np-Asd{iZK~W}bs+#u10-YIBLK&05Rs z|JB$C&RSS(1c>d6eKS`%=kb~Z!z|~t|2hmSQ~F_YRhGW7)zHDp*H&MuZTaHbKBj!x zboPO$O^TsVpNoQ#4l5LP~ zq~*$!?SW*XZPMcdJ!f1(5WP2Wb<;$qW1Vjr zxP9Y;Uo$l)@?Ojh`t#sv^~MancL)@ytxt4d3q9Tl5-nchbmgU8>yj^uZ-fFRp;Z3H z==he>=;}j-o_%q>^20KA!Z$;ag4g;3YwWo`Z+?M3adVHxI!x}2xW24+n-}vY5{GU` z3H~?&E~Yw&_Z;63u-TSa=qlIcVW+Rw172;tnww)b ziI`bEv3_RX759|3<;1^#2UL$lTk)%bDR%w>zoVIW#Qx${nb!<@eI{7naVOG!c{G;@ zs9aAjWFi2k2i$&I9lp=NJ2;C&%JD_J*v}eO2!gmK?Upd5N22yKj0)Vz%SE(}ZTpNi zxSh5B=EM~O3GAu3V!!Yw_1mcE{^fLc#8{tc2o|UP{Py|7JJ&Ncf%`<@1~9*AeZxz@ z+>i!|0ALm8>XD=3Mks{Xc{-OyPS<8zdC$kR8j7rL5nnK=71mL-CffpwzU!I}etVxH;5pXE>p^NxO=1ZBC zbUdUp_Bya{I{$q=)w=)v--hpYXHNhE=(Nz}towd!m6D z&otLD{2E4f)+JTeR#DEO3yQ7>PiafF?fT9%?YuMQl}HX}o=t+CZ{Z~mDnBX-<5*L* zt*flS`KUIEeCu~R6Ew2;lcQnBIjGMcc#lZr>|UVOZv5SPkHznXiV=tVwppKSvZ#%0 zNyXdT*L4uHVF@b$)z!1~t;-l|ayOb4b8x$5_aPj5k@k z$KjS|`WWP364DG{=AR$z&RE7W&)NXbV)Z_)HJ@NDGKr?kYcngZ_o3EPM6_#Ws}VbY zc(L64Z?LtyP(}y`Gc-i4a-GZy392iNN1$6i@6?ERcpr%6t&RsurE(t>MBwdGClC&i08 zoSR4WcC5Lz2vadf&ssR8NhHNcl%`N>tPA*YpRNE&wQ=zUgTs;q$No3j&DPS4BTAN;p}=0#~~ z>C;rHj@EBsFlwlll4N?@?9vYKU{$^EtlANFXL-rObdmj@YU^0BmHF?rZy$H>qaW%b zC|?<4rVfgicHUwCUN4|XiWy^M--OrJxCC+JZ@&{<46$9Ty!t$=X45_s$U~b08s#GH z06YS)1yay{(F$EnE%2JCbE)~g1fjaIkSNnE=+F(Jy8@j51RgRGCTJwLt^7Yw|9mUjH1$sps|FIz1GFcu{%P z#h}|K=>t@7e)3tN@8d#ZQI$5KNa}4=5Dl-RWtaxM9GY*vzG_0>lMDS4#iR3Ra?3*4 zbckbOc~~Cb8joANe@mYMf^4}pyppfWhziRv!bnX*Y#ybDhhBCK$@2NWNPi+Sm>XYm z4waMWo-dO_qF7>(jq`By&fCP!f#+NRUbd#Q|Ly@L)y_2p`B_DVmmG3=qZb6p-^s2R z%h4d1@W@r3U1v}95LVlq{zF##(pAN=bqT8y#lH54PXe>H$B8v~%;}x}COoo*qoyh` zM9#upahNJUHSi&7cIGRb(h?JZQ;ir58d!v*M{hm03R~Qm2LY(Yxw*NgZ33$w|GU-v z5dcl;KZfBBjI*fd12ONJxyCo&Z95}W3Lmz0{CtCnwaLqkJb z;XYb(KOHX-k&UG`7#zawHbZi?UL~4 zK1p@cEWs{?gewDu{%))6Ep7~%#CLnk%ga+s=vW21w()o!C<*V$X}+USfKKh1$uwU5 z5IbLQm7SY?3yQoGMx`vN&u({3vbMBawGqC-XaMqbRM!`B< z>0bbS@g!z`>9r>$>&wwL(Y6j!@$z6gKajpwYlcb%?kzh^+o$|RyD?DGN2aU0yq4Fe zt&CSoc1?7atRLQ%W{N;Tk@m#PHeuW5_rDg2t4@)Oq7Z)z%`jnT9uKy=?jB}_|R z=~Gs>?Y%3;l>Q#ZoN)|C0YPNwA54|pQzwi@g`QBr--@gKAGD$q)_Wb9?iRLF%t*>f_c%4$yv~OwmfN{;>0nIql5+c zLC*!{lw_kE`aj|`@Gp-S_wb>;%Ic<5aYJV{nJ*o)sAdA0V0!gVQXL?ciE`#Vp5O{g z>QCM@3vB=8mdDbAO8s_W>9b4eG(aCH z!WH@~u3wj-aq@nW&9%Z~$P<9sXSpnktN@@YLk!w1eKc@hxS9+Ad0CW!X5GQm)aU&} zoLoKzC%?4|y7Br)W2mH#_>%o3_IOrysH5^Z594SZ$A*g90Siq0dY=CA?q5dDUN1o3 z&C4B?bp_xjl%ecG<$piiu&d=5_4CCg<9aTw>WZJUs6KIwU@3t0`hDALWgKCpXp0m8 z21K_XH@|<3)d7Kpp(Szs9fgIs(!)BD0y%@s$h3dIL|8-nlN;|nHF8H0$p>L^L#2PR zwT>0X%LC@(&+qjMDK~OuXha@fDWKqW)?8>1J%IHW0Q^Vefo$3%6yagY*bvjEL;>3| zf8lC(AU6^NNMlj}s!z5y@4fc@e;>RI-^p}a3mkR0K{y%l*|8)F5`PR(Y=c2YYu}Ym zx!xj7bjtF-I>tqUpe4-3wfcP@a6PiM?@i)$*#X!hpan3r#KX8sj6B&{2vvw5`yXr% zTng0_{8awwz4XtSi5u9=(e;jhHcW=y{Z{B05;-&B|I!F4=GJw=~-E3!&OJtZWQ z*GU2^Nq;O8oA7@o$_S4PCQ*Y1|Juh`&K%Bvj-b^xeK&~i3&4xkPN5~k!{0Ql6+sN_ z6YIrYIIYMrkukGM7oB~K#M5f)&EC?tD@!CilnNRye=5C0GfC&f@OR3KHYct9d;)cY zc(^eDtXQq>G zd1L-e7n(PzWkB)CT}_R@T2e%JeT(&4nodi_(X4zHK-g7H?jk5x$iGrN?hltg`u|`t zF^J=U>o+250E=&JF%-#KT^6MEtk9OF@UGjZ6^+i_2}%q&kG8HVIpHLI((oS~4M5=H z-t7p>VgI^4I$85=ON{)Z3xip?oEJjd*;e|S?LrI8zI8%a9D|DG#B>yI6piO%gW2uJ z^mUDV#%E&q$qvA?5nAcwskLKJoth8Ssv@HTbM+OjDtl&n@dYF8i&V|F!^Xh7DASK- zhB>TL&9v>sU4-xf1tCKtbq6@P->1EJdyE+5m52(TI$W4Sa=3gAsY(28$ip(zO!FYz zS;%U?%i3IWF(9MQOi&JN_9Bf-cSOC7#~lXZ@)sh(mpYT9sb5RJj_e9=PPF4o8JaS&vKJ`zdq zk<$b~Y2Wj1h1hDYA8)55EVAGE+kD;_w1vf&lxwgM6vuR}h32f42E4&qirBwA5l%fz z{wk-A9TJ`xKGs{~H*?p;J90)LRdFQav@a3>;v-MpPkU~a{wH(z7JxvMk0|W0YIO(vt3xhm0kqd6z)QnkFg$ zm}d2_!A^DYX0VY~~92NAe6#(SEivFM9#z=D1N_9Q4c5w~{a#ry03K)x4f zkZ)yP?-f_on@ThcuH7j}OrT*g3}bjZ1`~E$i3R}WWdXMbgKk1YK%$x<7eupZctME> zkb?>w$dKU|@>Do4c+$N7!cCAQ4@@snfGmLH`!$E#I7S0=L*ZN8FJzUm0J^S@b(=v9 zeUa&f94+LQA!Z-#(DOy$48v25eyFDafDsOwpxPkGoC^#vs%Cf(vB8j*4g%gaDcBaW zaPc4k=M-LF_QPFgNx$`1F)fg(u|QR(t-`1!vDy98x1h@6cACoKhOPi;+JQyBX(>C@ z!{|Ej8X=`-RjU5crM1WQnhedeU^iyB^|^=kZ!4EfUxbEm({UCisffK^Sa6B3pfUGH zaQ$ilI?1SU)1}qA6hYYw{#+miJP?09u^v;&)o6n_525CS`g$m2~+0j>E+XR_wKy>qiz38_2 zavp}^BlI{@I7)U(+(ry&0R^5Gt~K}d0r5Y}BnW_?gS;T7^P1z8d@KN%>sMw1vSF;J z#=}4aXtxrFVxAQHyw?T5)u+L*UR|}RL4%v28Rs_(ClQp*UQ^2l)-7UT766}{%(EAW z%}R8mQ8=bgGCiNgQ-pio<5d`Jt^g77G~BMG88jcF&r9Biz1dS){`F9P81V5IIiTo{ zc_(Cv7Bqyl*4$$P_+PtS5+ZN;z{uV zR}OW+>wu6cf4MWZ2Z0t&ut@JlNw*$6t%0xWg}2K&&1ev^6yg-e4i(Nk=bL>2@=sAj zvtw5YE^%Xr$g@3sv*93O7Se0z5(@%dlwg_Ow$SD=BXQ@^9x8sJBM<3DHR1o0($s1AbnNek-|WQCV}lJ~p^ zHe~r}#4sn6lbQDkDR+1$Ee^LPFu%3WUhc;Jcf4rN48Z>O-c*bJ>|CBXJmVN?mG25| zIko-R{knyCS9&t9T~>I%9RNs7&tiT7bPE#RaWrZUS#g9GrohKFak`k{KIgcvC%Rx* z?(qgU>-+4jT(-l;Ff{Zg#_cj2oD!AU^81&)&JtHjOZ8#^W||eeA{5p~!5FI;!Z4P9 zZC0)=FRxIuv%7@Ub}&}oR#z@!Th9XxVbW*6(Fc(eh7@2>;WlKT7v1g>H+Cl4=)lR$ zHgWAf=0ZwaQPQh^TT?d00(+oc?sNY)f-ij&03+(BS@9#5+n+KbGu@qQ5AXIOp)D6& zP0uP@p5BkRS}Mr~afhpYnOfhP1$xAuyp-97#QNwrWc0HOhri(PzwN#Sr%ZCM52}1x z{cLD<@ij=Dd$?jE=qb>t$KI$Osazejlqm^;HAqIA$6wRb1E!I90%IZi8**z9voRd3!WwwM!#H_kw-6Av<1 zoG#v!wK<8M2>?A<4Hf=vsBl$9%Dm*cR`_dDUf_sY7#w}V0OkszN29oO>nFH{EZWRMNN+0qS+m;zvTCNq zd?qxu{?(-55B8T;muzO}s#AebmrXA?n##xPP2`DHBsQgcKNryuoVHDY;6EDk!m)rE ze~L#tFo3nzBBNlzwh3)AS@=Que6Jt;f?%lfTg#;pHVA1lNDjLwk&&9q54Pocy8Mk5 z774U#M*B;0O{3JXp_Nu)M{axm;d=tOmvx1NZ4>o`@ck`5HYZ-7r!)NKIeVD%GaE<+ zB3O7m?uUDWsYA;Zpn2AqTpJuI^9BgkZ`y{-I3Uavj}jaHZ}GoK=(hK{r9Vje`2sTu zT+f!afOfIPpSmlO^k{*=_ywVLl5@5cxofY9WtFFAuuh9aRc#OAG@Nlw0%2#0=azOv zp~&2|RvohGk!P}2*4;_^>UN7yq*(b+EA0`fE;%5q$2V=W3(?_P6y`r3>_^`>*GNZ! zT;Kikrh$IvCdEK~+l%g9vG*|!z=R$nlBuJj5hmWmZ%ttgF`?kw=K3Sc*-PQ&M?vSm y=&MgXjHw7Ugp?137y?b$Gi6m|d7vlzk7`=RDqAbY251Qffo|O}(8R0TVgDZj7nc$M literal 0 HcmV?d00001 From b94de4f0026008ab18c96be5475f68252b509cce Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 02:45:05 -0800 Subject: [PATCH 189/623] refac --- backend/open_webui/main.py | 4 +- .../components/admin/Settings/General.svelte | 49 +++++++++++++++---- .../components/admin/Users/UserList.svelte | 36 +++++++++++++- src/lib/components/chat/Chat.svelte | 12 +++++ src/lib/components/chat/Settings/About.svelte | 14 ++++-- src/lib/components/common/Banner.svelte | 3 +- 6 files changed, 101 insertions(+), 17 deletions(-) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 1ee04df0dd8..779fcec2b6c 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -1191,6 +1191,7 @@ async def get_app_config(request: Request): { "default_models": app.state.config.DEFAULT_MODELS, "default_prompt_suggestions": app.state.config.DEFAULT_PROMPT_SUGGESTIONS, + "user_count": user_count, "code": { "engine": app.state.config.CODE_EXECUTION_ENGINE, }, @@ -1214,11 +1215,10 @@ async def get_app_config(request: Request): "api_key": GOOGLE_DRIVE_API_KEY.value, }, "onedrive": {"client_id": ONEDRIVE_CLIENT_ID.value}, + "license_metadata": app.state.LICENSE_METADATA, **( { - "record_count": user_count, "active_entries": app.state.USER_COUNT, - "license_metadata": app.state.LICENSE_METADATA, } if user.role == "admin" else {} diff --git a/src/lib/components/admin/Settings/General.svelte b/src/lib/components/admin/Settings/General.svelte index 755812ba8b6..8bb353f62c0 100644 --- a/src/lib/components/admin/Settings/General.svelte +++ b/src/lib/components/admin/Settings/General.svelte @@ -1,4 +1,6 @@ diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index a5be4240e1f..74f57f564f4 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -1940,6 +1940,18 @@ {#if $banners.length > 0 && !history.currentId && !$chatId && selectedModels.length <= 1}
+ {#if ($config?.license_metadata?.type ?? null) === 'trial'} + + {/if} + {#if ($config?.license_metadata?.seats ?? null) !== null && $config?.user_count > $config?.license_metadata?.seats} diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index 73e156f3163..eb627f62624 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "أمس", "You": "انت", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index 1f31fbb05ce..8eda67acfad 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Пример: поща", "Example: ou=users,dc=foo,dc=example": "Пример: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Пример: sAMAccountName или uid или userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Изключи", "Execute code for analysis": "Изпълнете код за анализ", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Напишете съдържанието на вашия шаблон за модел тук", "Yesterday": "вчера", "You": "Вие", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Можете да чатите с максимум {{maxCount}} файл(а) наведнъж.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете да персонализирате взаимодействията си с LLM-и, като добавите спомени чрез бутона 'Управление' по-долу, правейки ги по-полезни и съобразени с вас.", "You cannot upload an empty file.": "Не можете да качите празен файл.", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index a114199f123..c132db62b02 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "আগামী", "You": "আপনি", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 69ccfd81c9e..b18326bf3fc 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Exemple: mail", "Example: ou=users,dc=foo,dc=example": "Exemple: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Exemple: sAMAccountName o uid o userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Excloure", "Execute code for analysis": "Executa el codi per analitzar-lo", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Introdueix el contingut de la plantilla del teu model aquí", "Yesterday": "Ahir", "You": "Tu", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Només pots xatejar amb un màxim de {{maxCount}} fitxers alhora.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.", "You cannot upload an empty file.": "No es pot pujar un ariux buit.", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index 578bf28a532..2ae99a9290b 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "", "You": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 65c286db669..6ee02804ebf 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Vyloučit", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Včera", "You": "Vy", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Můžete komunikovat pouze s maximálně {{maxCount}} soubor(y) najednou.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Můžete personalizovat své interakce s LLM pomocí přidávání vzpomínek prostřednictvím tlačítka 'Spravovat' níže, což je učiní pro vás užitečnějšími a lépe přizpůsobenými.", "You cannot upload an empty file.": "Nemůžete nahrát prázdný soubor.", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 35aaa7b9f82..598213dcc09 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "I går", "You": "Du", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan kun chatte med maksimalt {{maxCount}} fil(er) ad gangen.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan personliggøre dine interaktioner med LLM'er ved at tilføje minder via knappen 'Administrer' nedenfor, hvilket gør dem mere nyttige og skræddersyet til dig.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 9a60cae7e7b..a7da3d47dd5 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Beispiel: mail", "Example: ou=users,dc=foo,dc=example": "Beispiel: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Beispiel: sAMAccountName or uid or userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Ausschließen", "Execute code for analysis": "Code für Analyse ausführen", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Schreiben Sie hier Ihren Modellvorlageninhalt", "Yesterday": "Gestern", "You": "Sie", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Sie können nur mit maximal {{maxCount}} Datei(en) gleichzeitig chatten.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Personalisieren Sie Interaktionen mit LLMs, indem Sie über die Schaltfläche \"Verwalten\" Erinnerungen hinzufügen.", "You cannot upload an empty file.": "Sie können keine leere Datei hochladen.", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 3d99f4d069c..5c9dc67749b 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "", "You": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index 0aa7bd08643..0a5b2393e53 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "Παράδειγμα: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Παράδειγμα: sAMAccountName ή uid ή userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Εξαίρεση", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Γράψτε το περιεχόμενο του προτύπου μοντέλου σας εδώ", "Yesterday": "Εχθές", "You": "Εσείς", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Μπορείτε να συνομιλήσετε μόνο με μέγιστο αριθμό {{maxCount}} αρχείου(-ων) ταυτόχρονα.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Μπορείτε να προσωποποιήσετε τις αλληλεπιδράσεις σας με τα LLMs προσθέτοντας αναμνήσεις μέσω του κουμπιού 'Διαχείριση' παρακάτω, κάνοντάς τα πιο χρήσιμα και προσαρμοσμένα σε εσάς.", "You cannot upload an empty file.": "Δεν μπορείτε να ανεβάσετε ένα κενό αρχείο.", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index a35252accd4..f1aa076c94e 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "", "You": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index a35252accd4..f1aa076c94e 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "", "You": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index e6ef624fbee..c7817e8b31d 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Ejemplo: correo", "Example: ou=users,dc=foo,dc=example": "Ejemplo: ou=usuarios,dc=foo,dc=ejemplo", "Example: sAMAccountName or uid or userPrincipalName": "Ejemplo: sAMAccountName o uid o userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Excluir", "Execute code for analysis": "Ejecutar código para análisis", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Escribe el contenido de tu plantilla de modelo aquí", "Yesterday": "Ayer", "You": "Usted", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Solo puede chatear con un máximo de {{maxCount}} archivo(s) a la vez.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puede personalizar sus interacciones con LLMs añadiendo memorias a través del botón 'Gestionar' debajo, haciendo que sean más útiles y personalizados para usted.", "You cannot upload an empty file.": "No puede subir un archivo vacío.", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index b776fd8c6d5..f9c1d9f10f1 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "Adibidea: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Adibidea: sAMAccountName edo uid edo userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Baztertu", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Idatzi hemen zure modelo txantiloi edukia", "Yesterday": "Atzo", "You": "Zu", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Gehienez {{maxCount}} fitxategirekin txateatu dezakezu aldi berean.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "LLMekin dituzun interakzioak pertsonalizatu ditzakezu memoriak gehituz beheko 'Kudeatu' botoiaren bidez, lagungarriagoak eta zuretzat egokituagoak eginez.", "You cannot upload an empty file.": "Ezin duzu fitxategi huts bat kargatu.", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index f9a0e45954e..937cfbcdbab 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "دیروز", "You": "شما", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "شما در هر زمان نهایتا می\u200cتوانید با {{maxCount}} پرونده گفتگو کنید.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index ccf0d73f770..4b9ff557aed 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Esimerkki: posti", "Example: ou=users,dc=foo,dc=example": "Esimerkki: ou=käyttäjät,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Esimerkki: sAMAccountName tai uid tai userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Jätä pois", "Execute code for analysis": "Suorita koodi analysointia varten", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Kirjoita mallisi mallinnesisältö tähän", "Yesterday": "Eilen", "You": "Sinä", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Voit keskustella enintään {{maxCount}} tiedoston kanssa kerralla.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Voit personoida vuorovaikutustasi LLM-ohjelmien kanssa lisäämällä muistoja 'Hallitse'-painikkeen kautta, jolloin ne ovat hyödyllisempiä ja räätälöityjä sinua varten.", "You cannot upload an empty file.": "Et voi ladata tyhjää tiedostoa.", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index e203ad2c3c1..769c95787ca 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Hier", "You": "Vous", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des souvenirs via le bouton 'Gérer' ci-dessous, ce qui les rendra plus utiles et adaptés à vos besoins.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 7307075acb1..9cba643358e 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Exemple: mail", "Example: ou=users,dc=foo,dc=example": "Exemple: ou=utilisateurs,dc=foo,dc=exemple", "Example: sAMAccountName or uid or userPrincipalName": "Exemple: sAMAccountName ou uid ou userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Exclure", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Écrivez ici le contenu de votre modèle", "Yesterday": "Hier", "You": "Vous", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Vous ne pouvez discuter qu'avec un maximum de {{maxCount}} fichier(s) à la fois.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des mémoires à l'aide du bouton « Gérer » ci-dessous, ce qui les rendra plus utiles et mieux adaptées à vos besoins.", "You cannot upload an empty file.": "Vous ne pouvez pas envoyer un fichier vide.", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 2e7a9026e23..e5af652c94e 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "אתמול", "You": "אתה", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 9c56baacd71..2f25632d9eb 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "कल", "You": "आप", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index 0ceb14cdcb1..ffd87f83d7c 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Jučer", "You": "Vi", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Možete personalizirati svoje interakcije s LLM-ima dodavanjem uspomena putem gumba 'Upravljanje' u nastavku, čineći ih korisnijima i prilagođenijima vama.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index a726a1f6f08..306ec737f19 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Kizárás", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Tegnap", "You": "Ön", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Egyszerre maximum {{maxCount}} fájllal tud csevegni.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Az LLM-ekkel való interakcióit személyre szabhatja emlékek hozzáadásával a lenti 'Kezelés' gomb segítségével, így azok még hasznosabbak és személyre szabottabbak lesznek.", "You cannot upload an empty file.": "Nem tölthet fel üres fájlt.", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index 983770228e9..43f4c58fc90 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Kemarin", "You": "Anda", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda dapat mempersonalisasi interaksi Anda dengan LLM dengan menambahkan kenangan melalui tombol 'Kelola' di bawah ini, sehingga lebih bermanfaat dan disesuaikan untuk Anda.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 029b75c1246..41b803ff1e1 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Sampla: ríomhphost", "Example: ou=users,dc=foo,dc=example": "Sampla: ou=úsáideoirí,dc=foo,dc=sampla", "Example: sAMAccountName or uid or userPrincipalName": "Sampla: sAMAaccountName nó uid nó userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Eisigh", "Execute code for analysis": "Íosluchtaigh cód le haghaidh anailíse", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Scríobh do mhúnla ábhar teimpléad anseo", "Yesterday": "Inné", "You": "Tú", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Ní féidir leat comhrá a dhéanamh ach le comhad {{maxCount}} ar a mhéad ag an am.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Is féidir leat do chuid idirghníomhaíochtaí le LLManna a phearsantú ach cuimhní cinn a chur leis tríd an gcnaipe 'Bainistigh' thíos, rud a fhágann go mbeidh siad níos cabhrach agus níos oiriúnaí duit.", "You cannot upload an empty file.": "Ní féidir leat comhad folamh a uaslódáil.", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index 6f652451250..2499e366c07 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Ieri", "You": "Tu", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 8252a429895..60ebb353046 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "昨日", "You": "あなた", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 90c5960df2b..980a1bec8bb 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "გამორიცხვა", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "გუშინ", "You": "თქვენ", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index aba5ffc4c79..bed205601bd 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "미포함", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "여기에 모델 템플릿 내용을 입력하세요", "Yesterday": "어제", "You": "당신", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "동시에 최대 {{maxCount}} 파일과만 대화할 수 있습니다 ", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "아래 '관리' 버튼으로 메모리를 추가하여 LLM들과의 상호작용을 개인화할 수 있습니다. 이를 통해 더 유용하고 맞춤화된 경험을 제공합니다.", "You cannot upload an empty file.": "빈 파일을 업로드 할 수 없습니다", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index a25df401650..ef85c677e8e 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Vakar", "You": "Jūs", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Galite pagerinti modelių darbą suteikdami jiems atminties funkcionalumą.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 43f0fa58aba..8a1b1e021e5 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Semalam", "You": "Anda", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda boleh memperibadikan interaksi anda dengan LLM dengan menambahkan memori melalui butang 'Urus' di bawah, menjadikannya lebih membantu dan disesuaikan dengan anda.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index a2e597fa8cb..4e97236ff03 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Eksempel: mail", "Example: ou=users,dc=foo,dc=example": "Eksempel: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Eksempel: sAMAccountName eller uid eller userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Utelukk", "Execute code for analysis": "Kjør kode for analyse", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Skriv inn modellens malinnhold her", "Yesterday": "I går", "You": "Du", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan bare chatte med maksimalt {{maxCount}} fil(er) om gangen.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan tilpasse interaksjonene dine med språkmodeller ved å legge til minner gjennom Administrer-knappen nedenfor, slik at de blir mer til nyttige og tilpasset deg.", "You cannot upload an empty file.": "Du kan ikke laste opp en tom fil.", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 740406535fc..25ffe027c75 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "Voorbeeld: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Voorbeeld: sAMAccountName or uid or userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Sluit uit", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Schrijf je modelsjablooninhoud hier", "Yesterday": "Gisteren", "You": "Jij", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Je kunt slechts met maximaal {{maxCount}} bestand(en) tegelijk chatten", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Je kunt je interacties met LLM's personaliseren door herinneringen toe te voegen via de 'Beheer'-knop hieronder, waardoor ze nuttiger en voor jou op maat gemaakt worden.", "You cannot upload an empty file.": "Je kunt een leeg bestand niet uploaden.", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index 4538b62e8d4..e473441e1fa 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "ਕੱਲ੍ਹ", "You": "ਤੁਸੀਂ", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 35cbb459b5f..f3e1be87d1b 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Przykład: mail", "Example: ou=users,dc=foo,dc=example": "Przykład: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Przykład: sAMAccountName lub uid lub userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Wykluczyć", "Execute code for analysis": "Wykonaj kod do analizy", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Napisz tutaj zawartość szablonu modelu", "Yesterday": "Wczoraj", "You": "Ty", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Możesz rozmawiać jednocześnie maksymalnie z {{maxCount}} plikiem(i).", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Możesz spersonalizować swoje interakcje z LLM, dodając wspomnienia za pomocą przycisku 'Zarządzaj' poniżej, dzięki czemu będą one bardziej pomocne i dostosowane do Ciebie.", "You cannot upload an empty file.": "Nie możesz przesłać pustego pliku.", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 1cfd60095f2..9dc5c4d602d 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "Exemplo: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Exemplo: sAMAccountName ou uid ou userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Excluir", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Escreva o conteúdo do template do modelo aqui.", "Yesterday": "Ontem", "You": "Você", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Você só pode conversar com no máximo {{maxCount}} arquivo(s) de cada vez.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar suas interações com LLMs adicionando memórias através do botão 'Gerenciar' abaixo, tornando-as mais úteis e adaptadas a você.", "You cannot upload an empty file.": "Você não pode carregar um arquivo vazio.", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index b4957815f07..12799885db4 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Ontem", "You": "Você", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar as suas interações com LLMs adicionando memórias através do botão ‘Gerir’ abaixo, tornando-as mais úteis e personalizadas para você.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index e6c585e5087..309e1e309f2 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Exclude", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Ieri", "You": "Tu", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Puteți discuta cu un număr maxim de {{maxCount}} fișier(e) simultan.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puteți personaliza interacțiunile dvs. cu LLM-urile adăugând amintiri prin butonul 'Gestionează' de mai jos, făcându-le mai utile și adaptate la dvs.", "You cannot upload an empty file.": "Nu poți încărca un fișier gol.", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 9aec4a83402..2b6349ed642 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Исключать", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Напишите здесь содержимое шаблона вашей модели.", "Yesterday": "Вчера", "You": "Вы", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Одновременно вы можете общаться только с максимальным количеством файлов {{maxCount}}.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Вы можете персонализировать свое взаимодействие с LLMs, добавив воспоминания с помощью кнопки \"Управлять\" ниже, что сделает их более полезными и адаптированными для вас.", "You cannot upload an empty file.": "Вы не можете загрузить пустой файл.", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index b7d1249f8fc..549a9bdf43b 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Vylúčiť", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Včera", "You": "Vy", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Môžete komunikovať len s maximálne {{maxCount}} súbor(ami) naraz.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Môžete personalizovať svoje interakcie s LLM pridaním spomienok prostredníctvom tlačidla 'Spravovať' nižšie, čo ich urobí pre vás užitočnejšími a lepšie prispôsobenými.", "You cannot upload an empty file.": "Nemôžete nahrať prázdny súbor.", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index da4dd73b9a4..eb41f50b3c7 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Јуче", "You": "Ти", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете учинити разговор са ВЈМ-овима приснијим додавањем сећања користећи „Управљај“ думе испод и тиме их учинити приснијим и кориснијим.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index 5138884507c..2feb7f338e6 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Skriv din models innehåll här", "Yesterday": "Igår", "You": "Dig", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan endast chatta med maximalt {{maxCount}} fil(er) på samma gång", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan anpassa dina interaktioner med stora språkmodeller genom att lägga till minnen via knappen 'Hantera' nedan, så att de blir mer användbara och skräddarsydda för dig.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 585dd459128..7beaed61283 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "เมื่อวาน", "You": "คุณ", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "คุณสามารถปรับแต่งการโต้ตอบของคุณกับ LLMs โดยเพิ่มความทรงจำผ่านปุ่ม 'จัดการ' ด้านล่าง ทำให้มันมีประโยชน์และเหมาะกับคุณมากขึ้น", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index a35252accd4..f1aa076c94e 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "", "You": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index 54a75de61d7..e62e1f52a69 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "Örnek: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Örnek: sAMAccountName or uid or userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Hariç tut", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Model şablon içeriğinizi buraya yazın", "Yesterday": "Dün", "You": "Sen", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Aynı anda en fazla {{maxCount}} dosya ile sohbet edebilirsiniz.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Aşağıdaki 'Yönet' düğmesi aracılığıyla bellekler ekleyerek LLM'lerle etkileşimlerinizi kişiselleştirebilir, onları daha yararlı ve size özel hale getirebilirsiniz.", "You cannot upload an empty file.": "Boş bir dosya yükleyemezsiniz.", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index 412819568ad..4efc20a7a2b 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -450,6 +450,7 @@ "Example: mail": "Приклад: пошта", "Example: ou=users,dc=foo,dc=example": "Приклад: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Приклад: sAMAccountName або uid або userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Виключити", "Execute code for analysis": "Виконати код для аналізу", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "Напишіть вміст шаблону моделі тут", "Yesterday": "Вчора", "You": "Ви", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Ви можете спілкуватися лише з максимальною кількістю {{maxCount}} файлів одночасно.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Ви можете налаштувати ваші взаємодії з мовними моделями, додавши спогади через кнопку 'Керувати' внизу, що зробить їх більш корисними та персоналізованими для вас.", "You cannot upload an empty file.": "Ви не можете завантажити порожній файл.", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 4da71e85038..ab255fc34fb 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "خارج کریں", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "کل", "You": "آپ", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "آپ ایک وقت میں زیادہ سے زیادہ {{maxCount}} فائل(وں) کے ساتھ صرف چیٹ کر سکتے ہیں", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "آپ نیچے موجود 'Manage' بٹن کے ذریعے LLMs کے ساتھ اپنی بات چیت کو یادداشتیں شامل کرکے ذاتی بنا سکتے ہیں، جو انہیں آپ کے لیے زیادہ مددگار اور آپ کے متعلق بنائے گی", "You cannot upload an empty file.": "آپ خالی فائل اپلوڈ نہیں کر سکتے", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index b52b65551c4..3f40b10b566 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -450,6 +450,7 @@ "Example: mail": "", "Example: ou=users,dc=foo,dc=example": "", "Example: sAMAccountName or uid or userPrincipalName": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "", "Execute code for analysis": "", "Expand": "", @@ -1157,6 +1158,7 @@ "Write your model template content here": "", "Yesterday": "Hôm qua", "You": "Bạn", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Bạn có thể cá nhân hóa các tương tác của mình với LLM bằng cách thêm bộ nhớ thông qua nút 'Quản lý' bên dưới, làm cho chúng hữu ích hơn và phù hợp với bạn hơn.", "You cannot upload an empty file.": "", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 7dad89b2568..eb848fc91c8 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -450,6 +450,7 @@ "Example: mail": "例如:mail", "Example: ou=users,dc=foo,dc=example": "例如:ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "例如:sAMAccountName 或 uid 或 userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "排除", "Execute code for analysis": "执行代码进行分析", "Expand": "展开", @@ -1157,6 +1158,7 @@ "Write your model template content here": "在此写入模型模板内容", "Yesterday": "昨天", "You": "你", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "每次对话最多仅能附上 {{maxCount}} 个文件。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。", "You cannot upload an empty file.": "请勿上传空文件。", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 9b27514cf99..8cb3632c4ec 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -450,6 +450,7 @@ "Example: mail": "範例:mail", "Example: ou=users,dc=foo,dc=example": "範例:ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "範例:sAMAccountName 或 uid 或 userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "排除", "Execute code for analysis": "執行程式碼以進行分析", "Expand": "展開", @@ -1157,6 +1158,7 @@ "Write your model template content here": "在此撰寫您的模型範本内容", "Yesterday": "昨天", "You": "您", + "You are currently using a trial license. Please contact support to upgrade your license.": "", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "您一次最多只能與 {{maxCount}} 個檔案進行對話。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "您可以透過下方的「管理」按鈕新增記憶,將您與大型語言模型的互動個人化,讓它們更有幫助並更符合您的需求。", "You cannot upload an empty file.": "您無法上傳空檔案", From 0ce6975ec8134c2f71e12acbe059a9cd1c3849b5 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 21:49:42 -0800 Subject: [PATCH 202/623] chore: bump --- .github/ISSUE_TEMPLATE/bug_report.md | 80 ----------- .github/ISSUE_TEMPLATE/bug_report.yaml | 144 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.yaml | 14 +- package-lock.json | 4 +- package.json | 2 +- 5 files changed, 154 insertions(+), 90 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index d0f38c2334e..00000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' ---- - -# Bug Report - -## Important Notes - -- **Before submitting a bug report**: Please check the Issues or Discussions section to see if a similar issue or feature request has already been posted. It's likely we're already tracking it! If you’re unsure, start a discussion post first. This will help us efficiently focus on improving the project. - -- **Collaborate respectfully**: We value a constructive attitude, so please be mindful of your communication. If negativity is part of your approach, our capacity to engage may be limited. We’re here to help if you’re open to learning and communicating positively. Remember, Open WebUI is a volunteer-driven project managed by a single maintainer and supported by contributors who also have full-time jobs. We appreciate your time and ask that you respect ours. - -- **Contributing**: If you encounter an issue, we highly encourage you to submit a pull request or fork the project. We actively work to prevent contributor burnout to maintain the quality and continuity of Open WebUI. - -- **Bug reproducibility**: If a bug cannot be reproduced with a `:main` or `:dev` Docker setup, or a pip install with Python 3.11, it may require additional help from the community. In such cases, we will move it to the "issues" Discussions section due to our limited resources. We encourage the community to assist with these issues. Remember, it’s not that the issue doesn’t exist; we need your help! - -Note: Please remove the notes above when submitting your post. Thank you for your understanding and support! - ---- - -## Installation Method - -[Describe the method you used to install the project, e.g., git clone, Docker, pip, etc.] - -## Environment - -- **Open WebUI Version:** [e.g., v0.3.11] -- **Ollama (if applicable):** [e.g., v0.2.0, v0.1.32-rc1] - -- **Operating System:** [e.g., Windows 10, macOS Big Sur, Ubuntu 20.04] -- **Browser (if applicable):** [e.g., Chrome 100.0, Firefox 98.0] - -**Confirmation:** - -- [ ] I have read and followed all the instructions provided in the README.md. -- [ ] I am on the latest version of both Open WebUI and Ollama. -- [ ] I have included the browser console logs. -- [ ] I have included the Docker container logs. -- [ ] I have provided the exact steps to reproduce the bug in the "Steps to Reproduce" section below. - -## Expected Behavior: - -[Describe what you expected to happen.] - -## Actual Behavior: - -[Describe what actually happened.] - -## Description - -**Bug Summary:** -[Provide a brief but clear summary of the bug] - -## Reproduction Details - -**Steps to Reproduce:** -[Outline the steps to reproduce the bug. Be as detailed as possible.] - -## Logs and Screenshots - -**Browser Console Logs:** -[Include relevant browser console logs, if applicable] - -**Docker Container Logs:** -[Include relevant Docker container logs, if applicable] - -**Screenshots/Screen Recordings (if applicable):** -[Attach any relevant screenshots to help illustrate the issue] - -## Additional Information - -[Include any additional details that may help in understanding and reproducing the issue. This could include specific configurations, error messages, or anything else relevant to the bug.] - -## Note - -If the bug report is incomplete or does not follow the provided instructions, it may not be addressed. Please ensure that you have followed the steps outlined in the README.md and troubleshooting.md documents, and provide all necessary information for us to reproduce and address the issue. Thank you! diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 00000000000..171a82ca81e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,144 @@ +name: Bug Report +description: Create a detailed bug report to help us improve Open WebUI. +title: 'issue: ' +labels: ['bug', 'triage'] +assignees: [] +body: + - type: markdown + attributes: + value: | + # Bug Report + + ## Important Notes + + - **Before submitting a bug report**: Please check the [Issues](https://github.com/open-webui/open-webui/issues) or [Discussions](https://github.com/open-webui/open-webui/discussions) sections to see if a similar issue has already been reported. If unsure, start a discussion first, as this helps us efficiently focus on improving the project. + + - **Respectful collaboration**: Open WebUI is a volunteer-driven project with a single maintainer and contributors who also have full-time jobs. Please be constructive and respectful in your communication. + + - **Contributing**: If you encounter an issue, consider submitting a pull request or forking the project. We prioritize preventing contributor burnout to maintain Open WebUI's quality. + + - **Bug Reproducibility**: If a bug cannot be reproduced using a `:main` or `:dev` Docker setup or with `pip install` on Python 3.11, community assistance may be required. In such cases, we will move it to the "[Issues](https://github.com/open-webui/open-webui/discussions/categories/issues)" Discussions section. Your help is appreciated! + + - type: checkboxes + id: issue-check + attributes: + label: Check Existing Issues + description: Confirm that you’ve checked for existing reports before submitting a new one. + options: + - label: I have searched the existing issues and discussions. + required: true + + - type: dropdown + id: installation-method + attributes: + label: Installation Method + description: How did you install Open WebUI? + options: + - Git Clone + - Pip Install + - Docker + - Other + validations: + required: true + + - type: input + id: open-webui-version + attributes: + label: Open WebUI Version + description: Specify the version (e.g., v0.3.11) + validations: + required: true + + - type: input + id: ollama-version + attributes: + label: Ollama Version (if applicable) + description: Specify the version (e.g., v0.2.0, or v0.1.32-rc1) + validations: + required: false + + - type: input + id: operating-system + attributes: + label: Operating System + description: Specify the OS (e.g., Windows 10, macOS Sonoma, Ubuntu 22.04) + validations: + required: true + + - type: input + id: browser + attributes: + label: Browser (if applicable) + description: Specify the browser/version (e.g., Chrome 100.0, Firefox 98.0) + validations: + required: false + + - type: checkboxes + id: confirmation + attributes: + label: Confirmation + description: Ensure the following prerequisites have been met. + options: + - label: I have read and followed all instructions in `README.md`. + required: true + - label: I am using the latest version of **both** Open WebUI and Ollama. + required: true + - label: I have checked the browser console logs. + required: true + - label: I have checked the Docker container logs. + required: true + - label: I have listed steps to reproduce the bug in detail. + required: true + + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + description: Describe what should have happened. + validations: + required: true + + - type: textarea + id: actual-behavior + attributes: + label: Actual Behavior + description: Describe what actually happened. + validations: + required: true + + - type: textarea + id: reproduction-steps + attributes: + label: Steps to Reproduce + description: Provide step-by-step instructions to reproduce the issue. + placeholder: | + 1. Go to '...' + 2. Click on '...' + 3. Scroll down to '...' + 4. See the error message '...' + validations: + required: true + + - type: textarea + id: logs-screenshots + attributes: + label: Logs & Screenshots + description: Include relevant logs, errors, or screenshots to help diagnose the issue. + placeholder: 'Attach logs from the browser console, Docker logs, or error messages.' + validations: + required: true + + - type: textarea + id: additional-info + attributes: + label: Additional Information + description: Provide any extra details that may assist in understanding the issue. + validations: + required: false + + - type: markdown + attributes: + value: | + ## Note + If the bug report is incomplete or does not follow instructions, it may not be addressed. Ensure that you've followed all the **README.md** and **troubleshooting.md** guidelines, and provide all necessary information for us to reproduce the issue. + Thank you for contributing to Open WebUI! diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml index cc7a97c958b..2a326f65e46 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -1,7 +1,7 @@ name: Feature Request description: Suggest an idea for this project -title: "[Feature Request]: " -labels: ["triage"] +title: 'feat: ' +labels: ['triage'] body: - type: markdown attributes: @@ -11,7 +11,7 @@ body: Please check the [Issues](https://github.com/open-webui/open-webui/issues) or [Discussions](https://github.com/open-webui/open-webui/discussions) to see if a similar request has been posted. It's likely we're already tracking it! If you’re unsure, start a discussion post first. This will help us efficiently focus on improving the project. - + ### Collaborate respectfully We value a **constructive attitude**, so please be mindful of your communication. If negativity is part of your approach, our capacity to engage may be limited. We're here to help if you're **open to learning** and **communicating positively**. @@ -19,16 +19,16 @@ body: - Open WebUI is a **volunteer-driven project** - It's managed by a **single maintainer** - It's supported by contributors who also have **full-time jobs** - + We appreciate your time and ask that you **respect ours**. - + ### Contributing If you encounter an issue, we highly encourage you to submit a pull request or fork the project. We actively work to prevent contributor burnout to maintain the quality and continuity of Open WebUI. - + ### Bug reproducibility If a bug cannot be reproduced with a `:main` or `:dev` Docker setup, or a `pip install` with Python 3.11, it may require additional help from the community. In such cases, we will move it to the "[issues](https://github.com/open-webui/open-webui/discussions/categories/issues)" Discussions section due to our limited resources. We encourage the community to assist with these issues. Remember, it’s not that the issue doesn’t exist; we need your help! - + - type: checkboxes id: existing-issue attributes: diff --git a/package-lock.json b/package-lock.json index e48c7930e18..4b69377b228 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.5.18", + "version": "0.5.19", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.5.18", + "version": "0.5.19", "dependencies": { "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", diff --git a/package.json b/package.json index 1d2e86741a7..26700bdc592 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.5.18", + "version": "0.5.19", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", From e2d60bd92e983f42feb1337a5d20ad534032c46b Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 22:10:47 -0800 Subject: [PATCH 203/623] chore: format --- backend/open_webui/utils/filter.py | 1 - src/lib/utils/onedrive-file-picker.ts | 43 ++++++++++++++------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/backend/open_webui/utils/filter.py b/backend/open_webui/utils/filter.py index 8a7b26773bc..0edc2ac709e 100644 --- a/backend/open_webui/utils/filter.py +++ b/backend/open_webui/utils/filter.py @@ -65,7 +65,6 @@ async def process_filter_functions( **(valves if valves else {}) ) - try: # Prepare parameters sig = inspect.signature(handler) diff --git a/src/lib/utils/onedrive-file-picker.ts b/src/lib/utils/onedrive-file-picker.ts index 37b14045a96..60d2bb13c97 100644 --- a/src/lib/utils/onedrive-file-picker.ts +++ b/src/lib/utils/onedrive-file-picker.ts @@ -6,7 +6,7 @@ let CLIENT_ID = ''; async function getCredentials() { if (CLIENT_ID) return; - + const response = await fetch('/api/config'); if (!response.ok) { throw new Error('Failed to fetch OneDrive credentials'); @@ -18,7 +18,6 @@ async function getCredentials() { } } - let msalInstance: PublicClientApplication | null = null; // Initialize MSAL authentication @@ -27,24 +26,26 @@ async function initializeMsal() { if (!CLIENT_ID) { await getCredentials(); } - + const msalParams = { auth: { authority: 'https://login.microsoftonline.com/consumers', clientId: CLIENT_ID } }; - + if (!msalInstance) { msalInstance = new PublicClientApplication(msalParams); if (msalInstance.initialize) { await msalInstance.initialize(); } } - + return msalInstance; } catch (error) { - throw new Error('MSAL initialization failed: ' + (error instanceof Error ? error.message : String(error))); + throw new Error( + 'MSAL initialization failed: ' + (error instanceof Error ? error.message : String(error)) + ); } } @@ -57,14 +58,14 @@ async function getToken(): Promise { if (!msalInstance) { throw new Error('MSAL not initialized'); } - + const resp = await msalInstance.acquireTokenSilent(authParams); accessToken = resp.accessToken; } catch (err) { if (!msalInstance) { throw new Error('MSAL not initialized'); } - + try { const resp = await msalInstance.loginPopup(authParams); msalInstance.setActiveAccount(resp.account); @@ -73,14 +74,17 @@ async function getToken(): Promise { accessToken = resp2.accessToken; } } catch (popupError) { - throw new Error('Failed to login: ' + (popupError instanceof Error ? popupError.message : String(popupError))); + throw new Error( + 'Failed to login: ' + + (popupError instanceof Error ? popupError.message : String(popupError)) + ); } } - + if (!accessToken) { throw new Error('Failed to acquire access token'); } - + return accessToken; } @@ -106,7 +110,6 @@ const params = { } }; - // Download file from OneDrive async function downloadOneDriveFile(fileInfo: any): Promise { const accessToken = await getToken(); @@ -228,17 +231,17 @@ export async function openOneDrivePicker(): Promise { if (!authToken) { return reject(new Error('Failed to acquire access token')); } - + pickerWindow = window.open('', 'OneDrivePicker', 'width=800,height=600'); if (!pickerWindow) { return reject(new Error('Failed to open OneDrive picker window')); } - + const queryString = new URLSearchParams({ filePicker: JSON.stringify(params) }); const url = `${baseUrl}?${queryString.toString()}`; - + const form = pickerWindow.document.createElement('form'); form.setAttribute('action', url); form.setAttribute('method', 'POST'); @@ -247,10 +250,10 @@ export async function openOneDrivePicker(): Promise { input.setAttribute('name', 'access_token'); input.setAttribute('value', authToken); form.appendChild(input); - + pickerWindow.document.body.appendChild(form); form.submit(); - + window.addEventListener('message', handleWindowMessage); } catch (err) { if (pickerWindow) { @@ -267,14 +270,14 @@ export async function openOneDrivePicker(): Promise { // Pick and download file from OneDrive export async function pickAndDownloadFile(): Promise<{ blob: Blob; name: string } | null> { const pickerResult = await openOneDrivePicker(); - + if (!pickerResult || !pickerResult.items || pickerResult.items.length === 0) { return null; } - + const selectedFile = pickerResult.items[0]; const blob = await downloadOneDriveFile(selectedFile); - + return { blob, name: selectedFile.name }; } From 4df74690e113da9ec430694edc754fcf5b18a851 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 22:10:51 -0800 Subject: [PATCH 204/623] doc: changelog --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29715f6f340..7e11228706a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,24 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.5.19] - 2024-03-04 + +### Added + +- **📊 Logit Bias Parameter Support**: Fine-tune conversation dynamics by adjusting the Logit Bias parameter directly in chat settings, giving you more control over model responses. +- **⌨️ Customizable Enter Behavior**: You can now configure Enter to send messages only when combined with Ctrl (Ctrl+Enter) via Settings > Interface, preventing accidental message sends. +- **📝 Collapsible Code Blocks**: Easily collapse long code blocks to declutter your chat, making it easier to focus on important details. +- **🏷️ Tag Selector in Model Selector**: Quickly find and categorize models with the new tag filtering system in the Model Selector, streamlining model discovery. +- **📈 Experimental Elasticsearch Vector DB Support**: Now supports Elasticsearch as a vector database, offering more flexibility for data retrieval in Retrieval-Augmented Generation (RAG) workflows. +- **⚙️ General Reliability Enhancements**: Various stability improvements across the WebUI, ensuring a smoother, more consistent experience. +- **🌍 Updated Translations**: Refined multilingual support for better localization and accuracy across various languages. + +### Fixed + +- **🔄 "Stream" Hook Activation**: Fixed an issue where the "Stream" hook only worked when globally enabled, ensuring reliable real-time filtering. +- **📧 LDAP Email Case Sensitivity**: Resolved an issue where LDAP login failed due to email case sensitivity mismatches, improving authentication reliability. +- **💬 WebSocket Chat Event Registration**: Fixed a bug preventing chat event listeners from being registered upon sign-in, ensuring real-time updates work properly. + ## [0.5.18] - 2025-02-27 ### Fixed From 976c94ba5af0f368884ecf16e96000e4bcb72d48 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 4 Mar 2025 22:13:51 -0800 Subject: [PATCH 205/623] chore: pyproject --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2cf87c2f9bf..0666ac8a262 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,6 +57,7 @@ dependencies = [ "qdrant-client~=1.12.0", "opensearch-py==2.8.0", "playwright==1.49.1", + "elasticsearch==8.17.1", "transformers", "sentence-transformers==3.3.1", From 2dc5f3c30cd445de7573f459100f4570aeac3688 Mon Sep 17 00:00:00 2001 From: Aleix Dorca Date: Wed, 5 Mar 2025 08:42:16 +0100 Subject: [PATCH 206/623] Update catalan translation.json --- src/lib/i18n/locales/ca-ES/translation.json | 118 ++++++++++---------- 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index b18326bf3fc..6581e4d4e1d 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -5,7 +5,7 @@ "(e.g. `sh webui.sh --api`)": "(p. ex. `sh webui.sh --api`)", "(latest)": "(últim)", "{{ models }}": "{{ models }}", - "{{COUNT}} hidden lines": "", + "{{COUNT}} hidden lines": "{{COUNT}} línies ocultes", "{{COUNT}} Replies": "{{COUNT}} respostes", "{{user}}'s Chats": "Els xats de {{user}}", "{{webUIName}} Backend Required": "El Backend de {{webUIName}} és necessari", @@ -14,7 +14,7 @@ "A task model is used when performing tasks such as generating titles for chats and web search queries": "Un model de tasca s'utilitza quan es realitzen tasques com ara generar títols per a xats i consultes de cerca per a la web", "a user": "un usuari", "About": "Sobre", - "Accept autocomplete generation / Jump to prompt variable": "", + "Accept autocomplete generation / Jump to prompt variable": "Acceptar la generació autocompletada / Saltar a la variable d'indicació", "Access": "Accés", "Access Control": "Control d'accés", "Accessible to all users": "Accessible a tots els usuaris", @@ -52,7 +52,7 @@ "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Els administradors tenen accés a totes les eines en tot moment; els usuaris necessiten eines assignades per model a l'espai de treball.", "Advanced Parameters": "Paràmetres avançats", "Advanced Params": "Paràmetres avançats", - "All": "", + "All": "Tots", "All Documents": "Tots els documents", "All models deleted successfully": "Tots els models s'han eliminat correctament", "Allow Chat Controls": "Permetre els controls de xat", @@ -66,7 +66,7 @@ "Allow Voice Interruption in Call": "Permetre la interrupció de la veu en una trucada", "Allowed Endpoints": "Punts d'accés permesos", "Already have an account?": "Ja tens un compte?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045.", "Always": "Sempre", "Amazing": "Al·lucinant", "an assistant": "un assistent", @@ -88,17 +88,17 @@ "Archive All Chats": "Arxiva tots els xats", "Archived Chats": "Xats arxivats", "archived-chat-export": "archived-chat-export", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "Estàs segur que vols netejar totes les memòries? Aquesta acció no es pot desfer.", "Are you sure you want to delete this channel?": "Estàs segur que vols eliminar aquest canal?", "Are you sure you want to delete this message?": "Estàs segur que vols eliminar aquest missatge?", "Are you sure you want to unarchive all archived chats?": "Estàs segur que vols desarxivar tots els xats arxivats?", "Are you sure?": "Estàs segur?", "Arena Models": "Models de l'Arena", "Artifacts": "Artefactes", - "Ask": "", + "Ask": "Preguntar", "Ask a question": "Fer una pregunta", "Assistant": "Assistent", - "Attach file from knowledge": "", + "Attach file from knowledge": "Associar arxiu del coneixement", "Attention to detail": "Atenció al detall", "Attribute for Mail": "Atribut per al Correu", "Attribute for Username": "Atribut per al Nom d'usuari", @@ -130,10 +130,10 @@ "Bing Search V7 Endpoint": "Punt de connexió a Bing Search V7", "Bing Search V7 Subscription Key": "Clau de subscripció a Bing Search V7", "Bocha Search API Key": "Clau API de Bocha Search", - "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "", + "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Potenciar o penalitzar tokens específics per a respostes limitades. Els valors de biaix es fixaran entre -100 i 100 (inclosos). (Per defecte: cap)", "Brave Search API Key": "Clau API de Brave Search", "By {{name}}": "Per {{name}}", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "Desactivar l'Embedding i el Retrieval", "Bypass SSL verification for Websites": "Desactivar la verificació SSL per a l'accés a Internet", "Calendar": "Calendari", "Call": "Trucada", @@ -167,7 +167,7 @@ "Ciphers": "Xifradors", "Citation": "Cita", "Clear memory": "Esborrar la memòria", - "Clear Memory": "", + "Clear Memory": "Esborrar la memòria", "click here": "prem aquí", "Click here for filter guides.": "Clica aquí per filtrar les guies.", "Click here for help.": "Clica aquí per obtenir ajuda.", @@ -194,7 +194,7 @@ "Code Interpreter": "Intèrpret de codi", "Code Interpreter Engine": "Motor de l'intèrpret de codi", "Code Interpreter Prompt Template": "Plantilla de la indicació de l'intèrpret de codi", - "Collapse": "", + "Collapse": "Col·lapsar", "Collection": "Col·lecció", "Color": "Color", "ComfyUI": "ComfyUI", @@ -213,19 +213,19 @@ "Confirm your new password": "Confirma la teva nova contrasenya", "Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI", "Connections": "Connexions", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament.", "Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI", "Content": "Contingut", - "Content Extraction Engine": "", + "Content Extraction Engine": "Motor d'extracció de contingut", "Context Length": "Mida del context", "Continue Response": "Continuar la resposta", "Continue with {{provider}}": "Continuar amb {{provider}}", "Continue with Email": "Continuar amb el correu", "Continue with LDAP": "Continuar amb LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar com es divideix el text del missatge per a les sol·licituds TTS. 'Puntuació' divideix en frases, 'paràgrafs' divideix en paràgrafs i 'cap' manté el missatge com una cadena única.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Controlar la repetició de seqüències de tokens en el text generat. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 1,1) serà més indulgent. A l'1, està desactivat.", "Controls": "Controls", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "Controla l'equilibri entre la coherència i la diversitat de la sortida. Un valor més baix donarà lloc a un text més enfocat i coherent.", "Copied": "Copiat", "Copied shared chat URL to clipboard!": "S'ha copiat l'URL compartida al porta-retalls!", "Copied to clipboard": "Copiat al porta-retalls", @@ -250,11 +250,11 @@ "Created At": "Creat el", "Created by": "Creat per", "CSV Import": "Importar CSV", - "Ctrl+Enter to Send": "", + "Ctrl+Enter to Send": "Ctrl+Enter per enviar", "Current Model": "Model actual", "Current Password": "Contrasenya actual", "Custom": "Personalitzat", - "Danger Zone": "", + "Danger Zone": "Zona de perill", "Dark": "Fosc", "Database": "Base de dades", "December": "Desembre", @@ -315,8 +315,8 @@ "Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.", "Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.", "Document": "Document", - "Document Intelligence": "", - "Document Intelligence endpoint and key required.": "", + "Document Intelligence": "Document Intelligence", + "Document Intelligence endpoint and key required.": "Fa falta un punt de connexió i una clau per a Document Intelligence.", "Documentation": "Documentació", "Documents": "Documents", "does not make any external connections, and your data stays securely on your locally hosted server.": "no realitza connexions externes, i les teves dades romanen segures al teu servidor allotjat localment.", @@ -352,7 +352,7 @@ "ElevenLabs": "ElevenLabs", "Email": "Correu electrònic", "Embark on adventures": "Embarcar en aventures", - "Embedding": "", + "Embedding": "Incrustació", "Embedding Batch Size": "Mida del lot d'incrustació", "Embedding Model": "Model d'incrustació", "Embedding Model Engine": "Motor de model d'incrustació", @@ -364,7 +364,7 @@ "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activar l'assignació de memòria (mmap) per carregar les dades del model. Aquesta opció permet que el sistema utilitzi l'emmagatzematge en disc com a extensió de la memòria RAM tractant els fitxers de disc com si estiguessin a la memòria RAM. Això pot millorar el rendiment del model permetent un accés més ràpid a les dades. Tanmateix, és possible que no funcioni correctament amb tots els sistemes i pot consumir una quantitat important d'espai en disc.", "Enable Message Rating": "Permetre la qualificació de missatges", - "Enable Mirostat sampling for controlling perplexity.": "", + "Enable Mirostat sampling for controlling perplexity.": "Permetre el mostreig de Mirostat per controlar la perplexitat", "Enable New Sign Ups": "Permetre nous registres", "Enabled": "Habilitat", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.", @@ -381,10 +381,10 @@ "Enter CFG Scale (e.g. 7.0)": "Entra l'escala CFG (p.ex. 7.0)", "Enter Chunk Overlap": "Introdueix la mida de solapament de blocs", "Enter Chunk Size": "Introdueix la mida del bloc", - "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", + "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Introdueix parelles de \"token:valor de biaix\" separats per comes (exemple: 5432:100, 413:-100)", "Enter description": "Introdueix la descripció", - "Enter Document Intelligence Endpoint": "", - "Enter Document Intelligence Key": "", + "Enter Document Intelligence Endpoint": "Introdueix el punt de connexió de Document Intelligence", + "Enter Document Intelligence Key": "Introdueix la clau de Document Intelligence", "Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)", "Enter Exa API Key": "Introdueix la clau API de d'EXA", "Enter Github Raw URL": "Introdueix l'URL en brut de Github", @@ -396,13 +396,13 @@ "Enter Jupyter Token": "Introdueix el token de Jupyter", "Enter Jupyter URL": "Introdueix la URL de Jupyter", "Enter Kagi Search API Key": "Introdueix la clau API de Kagi Search", - "Enter Key Behavior": "", + "Enter Key Behavior": "Introdueix el comportament de clau", "Enter language codes": "Introdueix els codis de llenguatge", "Enter Model ID": "Introdueix l'identificador del model", "Enter model tag (e.g. {{modelTag}})": "Introdueix l'etiqueta del model (p. ex. {{modelTag}})", "Enter Mojeek Search API Key": "Introdueix la clau API de Mojeek Search", "Enter Number of Steps (e.g. 50)": "Introdueix el nombre de passos (p. ex. 50)", - "Enter Perplexity API Key": "", + "Enter Perplexity API Key": "Introdueix la clau API de Perplexity", "Enter proxy URL (e.g. https://user:password@host:port)": "Entra l'URL (p. ex. https://user:password@host:port)", "Enter reasoning effort": "Introdueix l'esforç de raonament", "Enter Sampler (e.g. Euler a)": "Introdueix el mostrejador (p.ex. Euler a)", @@ -426,7 +426,7 @@ "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Entra la URL pública de WebUI. Aquesta URL s'utilitzarà per generar els enllaços en les notificacions.", "Enter Tika Server URL": "Introdueix l'URL del servidor Tika", "Enter timeout in seconds": "Entra el temps màxim en segons", - "Enter to Send": "", + "Enter to Send": "Enter per enviar", "Enter Top K": "Introdueix Top K", "Enter URL (e.g. http://127.0.0.1:7860/)": "Introdueix l'URL (p. ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Introdueix l'URL (p. ex. http://localhost:11434)", @@ -450,13 +450,13 @@ "Example: mail": "Exemple: mail", "Example: ou=users,dc=foo,dc=example": "Exemple: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Exemple: sAMAccountName o uid o userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "S'ha superat el nombre de places a la vostra llicència. Poseu-vos en contacte amb el servei d'assistència per augmentar el nombre de places.", "Exclude": "Excloure", - "Execute code for analysis": "Executa el codi per analitzar-lo", - "Expand": "", + "Execute code for analysis": "Executar el codi per analitzar-lo", + "Expand": "Expandir", "Experimental": "Experimental", - "Explain": "", - "Explain this section to me in more detail": "", + "Explain": "Explicar", + "Explain this section to me in more detail": "Explica'm aquesta secció amb més detall", "Explore the cosmos": "Explorar el cosmos", "Export": "Exportar", "Export All Archived Chats": "Exportar tots els xats arxivats", @@ -529,7 +529,7 @@ "General": "General", "Generate an image": "Generar una imatge", "Generate Image": "Generar imatge", - "Generate prompt pair": "", + "Generate prompt pair": "Generar parella d'indicació", "Generating search query": "Generant consulta", "Get started": "Començar", "Get started with {{WEBUI_NAME}}": "Començar amb {{WEBUI_NAME}}", @@ -580,12 +580,12 @@ "Include": "Incloure", "Include `--api-auth` flag when running stable-diffusion-webui": "Inclou `--api-auth` quan executis stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.", "Info": "Informació", "Input commands": "Entra comandes", "Install from Github URL": "Instal·lar des de l'URL de Github", "Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu", - "Integration": "", + "Integration": "Integració", "Interface": "Interfície", "Invalid file format.": "Format d'arxiu no vàlid.", "Invalid Tag": "Etiqueta no vàlida", @@ -633,12 +633,12 @@ "Listening...": "Escoltant...", "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "Els models de llenguatge poden cometre errors. Verifica la informació important.", - "Loader": "", + "Loader": "Carregador", "Loading Kokoro.js...": "Carregant Kokoro.js", "Local": "Local", "Local Models": "Models locals", - "Location access not allowed": "", - "Logit Bias": "", + "Location access not allowed": "Accés a la ubicació no permesa", + "Logit Bias": "Biaix Logit", "Lost": "Perdut", "LTR": "LTR", "Made by Open WebUI Community": "Creat per la Comunitat OpenWebUI", @@ -712,7 +712,7 @@ "No HTML, CSS, or JavaScript content found.": "No s'ha trobat contingut HTML, CSS o JavaScript.", "No inference engine with management support found": "No s'ha trobat un motor d'inferència amb suport de gestió", "No knowledge found": "No s'ha trobat Coneixement", - "No memories to clear": "", + "No memories to clear": "No hi ha memòries per netejar", "No model IDs": "No hi ha IDs de model", "No models found": "No s'han trobat models", "No models selected": "No s'ha seleccionat cap model", @@ -742,7 +742,7 @@ "Ollama API settings updated": "La configuració de l'API d'Ollama s'ha actualitzat", "Ollama Version": "Versió d'Ollama", "On": "Activat", - "OneDrive": "", + "OneDrive": "OneDrive", "Only alphanumeric characters and hyphens are allowed": "Només es permeten caràcters alfanumèrics i guions", "Only alphanumeric characters and hyphens are allowed in the command string.": "Només es permeten caràcters alfanumèrics i guions en la comanda.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Només es poden editar col·leccions, crea una nova base de coneixement per editar/afegir documents.", @@ -779,7 +779,7 @@ "Permission denied when accessing microphone": "Permís denegat en accedir al micròfon", "Permission denied when accessing microphone: {{error}}": "Permís denegat en accedir al micròfon: {{error}}", "Permissions": "Permisos", - "Perplexity API Key": "", + "Perplexity API Key": "Clau API de Perplexity", "Personalization": "Personalització", "Pin": "Fixar", "Pinned": "Fixat", @@ -825,7 +825,7 @@ "Reasoning Effort": "Esforç de raonament", "Record voice": "Enregistrar la veu", "Redirecting you to Open WebUI Community": "Redirigint-te a la comunitat OpenWebUI", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Redueix la probabilitat de generar ximpleries. Un valor més alt (p. ex. 100) donarà respostes més diverses, mentre que un valor més baix (p. ex. 10) serà més conservador.", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Fes referència a tu mateix com a \"Usuari\" (p. ex., \"L'usuari està aprenent espanyol\")", "References from": "Referències de", "Refused when it shouldn't have": "Refusat quan no hauria d'haver estat", @@ -851,7 +851,7 @@ "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de resposta no es poden activar perquè els permisos del lloc web han estat rebutjats. Comprova les preferències del navegador per donar l'accés necessari.", "Response splitting": "Divisió de la resposta", "Result": "Resultat", - "Retrieval": "", + "Retrieval": "Retrieval", "Retrieval Query Generation": "Generació de consultes Retrieval", "Rich Text Input for Chat": "Entrada de text ric per al xat", "RK": "RK", @@ -882,7 +882,7 @@ "Search options": "Opcions de cerca", "Search Prompts": "Cercar indicacions", "Search Result Count": "Recompte de resultats de cerca", - "Search the internet": "Cerca a internet", + "Search the internet": "Cercar a internet", "Search Tools": "Cercar eines", "SearchApi API Key": "Clau API de SearchApi", "SearchApi Engine": "Motor de SearchApi", @@ -934,11 +934,11 @@ "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establir el nombre de fils de treball utilitzats per al càlcul. Aquesta opció controla quants fils s'utilitzen per processar les sol·licituds entrants simultàniament. Augmentar aquest valor pot millorar el rendiment amb càrregues de treball de concurrència elevada, però també pot consumir més recursos de CPU.", "Set Voice": "Establir la veu", "Set whisper model": "Establir el model whisper", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets how far back for the model to look back to prevent repetition.": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", - "Sets the size of the context window used to generate the next token.": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Estableix un biaix pla contra tokens que han aparegut almenys una vegada. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat.", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Estableix un biaix d'escala contra tokens per penalitzar les repeticions, en funció de quantes vegades han aparegut. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat.", + "Sets how far back for the model to look back to prevent repetition.": "Estableix fins a quin punt el model mira enrere per evitar la repetició.", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Estableix la llavor del nombre aleatori que s'utilitzarà per a la generació. Establir-ho a un número específic farà que el model generi el mateix text per a la mateixa sol·licitud.", + "Sets the size of the context window used to generate the next token.": "Estableix la mida de la finestra de context utilitzada per generar el següent token.", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Establir les seqüències d'aturada a utilitzar. Quan es trobi aquest patró, el LLM deixarà de generar text. Es poden establir diversos patrons de parada especificant diversos paràmetres de parada separats en un fitxer model.", "Settings": "Preferències", "Settings saved successfully!": "Les preferències s'han desat correctament", @@ -980,8 +980,8 @@ "System Prompt": "Indicació del Sistema", "Tags Generation": "Generació d'etiquetes", "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", - "Talk to model": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració.", + "Talk to model": "Parlar amb el model", "Tap to interrupt": "Prem per interrompre", "Tasks": "Tasques", "Tavily API Key": "Clau API de Tavily", @@ -995,7 +995,7 @@ "Thanks for your feedback!": "Gràcies pel teu comentari!", "The Application Account DN you bind with for search": "El DN del compte d'aplicació per realitzar la cerca", "The base to search for users": "La base per cercar usuaris", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "La mida del lot determina quantes sol·licituds de text es processen alhora. Una mida de lot més gran pot augmentar el rendiment i la velocitat del model, però també requereix més memòria.", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Els desenvolupadors d'aquest complement són voluntaris apassionats de la comunitat. Si trobeu útil aquest complement, considereu contribuir al seu desenvolupament.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La classificació d'avaluació es basa en el sistema de qualificació Elo i s'actualitza en temps real.", "The LDAP attribute that maps to the mail that users use to sign in.": "L'atribut LDAP que s'associa al correu que els usuaris utilitzen per iniciar la sessió.", @@ -1004,14 +1004,14 @@ "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La mida màxima del fitxer en MB. Si la mida del fitxer supera aquest límit, el fitxer no es carregarà.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "El nombre màxim de fitxers que es poden utilitzar alhora al xat. Si el nombre de fitxers supera aquest límit, els fitxers no es penjaran.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "El valor de puntuació hauria de ser entre 0.0 (0%) i 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "La temperatura del model. Augmentar la temperatura farà que el model respongui de manera més creativa.", "Theme": "Tema", "Thinking...": "Pensant...", "This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Aquesta opció estableix el nombre màxim de tokens que el model pot generar en la seva resposta. Augmentar aquest límit permet que el model proporcioni respostes més llargues, però també pot augmentar la probabilitat que es generi contingut poc útil o irrellevant.", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aquesta opció eliminarà tots els fitxers existents de la col·lecció i els substituirà per fitxers recentment penjats.", "This response was generated by \"{{model}}\"": "Aquesta resposta l'ha generat el model \"{{model}}\"", "This will delete": "Això eliminarà", @@ -1066,7 +1066,7 @@ "Top P": "Top P", "Transformers": "Transformadors", "Trouble accessing Ollama?": "Problemes en accedir a Ollama?", - "Trust Proxy Environment": "", + "Trust Proxy Environment": "Confiar en l'entorn proxy", "TTS Model": "Model TTS", "TTS Settings": "Preferències de TTS", "TTS Voice": "Veu TTS", @@ -1148,7 +1148,7 @@ "Why?": "Per què?", "Widescreen Mode": "Mode de pantalla ampla", "Won": "Ha guanyat", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Funciona juntament amb top-k. Un valor més alt (p. ex., 0,95) donarà lloc a un text més divers, mentre que un valor més baix (p. ex., 0,5) generarà un text més concentrat i conservador.", "Workspace": "Espai de treball", "Workspace Permissions": "Permisos de l'espai de treball", "Write": "Escriure", @@ -1158,7 +1158,7 @@ "Write your model template content here": "Introdueix el contingut de la plantilla del teu model aquí", "Yesterday": "Ahir", "You": "Tu", - "You are currently using a trial license. Please contact support to upgrade your license.": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "Actualment esteu utilitzant una llicència de prova. Poseu-vos en contacte amb el servei d'assistència per actualitzar la vostra llicència.", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Només pots xatejar amb un màxim de {{maxCount}} fitxers alhora.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.", "You cannot upload an empty file.": "No es pot pujar un ariux buit.", @@ -1172,6 +1172,6 @@ "Your account status is currently pending activation.": "El compte està actualment pendent d'activació", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Tota la teva contribució anirà directament al desenvolupador del complement; Open WebUI no se'n queda cap percentatge. Tanmateix, la plataforma de finançament escollida pot tenir les seves pròpies comissions.", "Youtube": "Youtube", - "Youtube Language": "", + "Youtube Language": "Idioma de YouTube", "Youtube Proxy URL": "" } From 1639fbb54427d55dcd945a226dabcb5cb4ae7abf Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 00:45:04 -0800 Subject: [PATCH 207/623] fix: site.webmanifest --- backend/open_webui/static/site.webmanifest | 40 +++++++++++----------- static/static/site.webmanifest | 4 +-- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/backend/open_webui/static/site.webmanifest b/backend/open_webui/static/site.webmanifest index 0e59bbb2823..2b74733fa23 100644 --- a/backend/open_webui/static/site.webmanifest +++ b/backend/open_webui/static/site.webmanifest @@ -1,21 +1,21 @@ { - "name": "Open WebUI", - "short_name": "WebUI", - "icons": [ - { - "src": "/favicon/web-app-manifest-192x192.png", - "sizes": "192x192", - "type": "image/png", - "purpose": "maskable" - }, - { - "src": "/favicon/web-app-manifest-512x512.png", - "sizes": "512x512", - "type": "image/png", - "purpose": "maskable" - } - ], - "theme_color": "#ffffff", - "background_color": "#ffffff", - "display": "standalone" -} \ No newline at end of file + "name": "Open WebUI", + "short_name": "WebUI", + "icons": [ + { + "src": "/static/web-app-manifest-192x192.png", + "sizes": "192x192", + "type": "image/png", + "purpose": "maskable" + }, + { + "src": "/static/web-app-manifest-512x512.png", + "sizes": "512x512", + "type": "image/png", + "purpose": "maskable" + } + ], + "theme_color": "#ffffff", + "background_color": "#ffffff", + "display": "standalone" +} diff --git a/static/static/site.webmanifest b/static/static/site.webmanifest index 0e59bbb2823..95915ae2bca 100644 --- a/static/static/site.webmanifest +++ b/static/static/site.webmanifest @@ -3,13 +3,13 @@ "short_name": "WebUI", "icons": [ { - "src": "/favicon/web-app-manifest-192x192.png", + "src": "/static/web-app-manifest-192x192.png", "sizes": "192x192", "type": "image/png", "purpose": "maskable" }, { - "src": "/favicon/web-app-manifest-512x512.png", + "src": "/static/web-app-manifest-512x512.png", "sizes": "512x512", "type": "image/png", "purpose": "maskable" From a44b35e99e560647da5952696f6aa3fb6e138ef6 Mon Sep 17 00:00:00 2001 From: Fabio Polito Date: Wed, 5 Mar 2025 17:53:45 +0000 Subject: [PATCH 208/623] fix: fix DoclingLoader input params --- backend/open_webui/retrieval/loaders/main.py | 55 +++++++++++++------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index e305b59b8d8..2ffd310bc63 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -126,24 +126,43 @@ def load(self) -> list[Document]: raise ValueError("File path is required for DoclingLoader") with open(self.file_path, "rb") as f: - files = {"files": (self.file_path, f, self.mime_type or "application/octet-stream")} - + files = { + "files": ( + self.file_path, + f, + self.mime_type or "application/octet-stream", + ) + } + params = { - "from_formats": ["docx", "pptx", "html", "xml_pubmed", "image", "pdf", "asciidoc", "md", "xlsx", "xml_uspto", "json_docling"], - "to_formats": ["md"], - "image_export_mode": "placeholder", - "do_ocr": True, - "force_ocr": False, - "ocr_engine": "easyocr", - "ocr_lang": None, - "pdf_backend": "dlparse_v2", - "table_mode": "fast", - "abort_on_error": False, - "return_as_file": False, - "do_table_structure": True, - "include_images": True, - "images_scale": 2.0, - } + "from_formats": [ + "docx", + "pptx", + "html", + "image", + "pdf", + "asciidoc", + "md", + "csv", + "xlsx", + "xml_uspto", + "xml_jats", + "json_docling", + ], + "to_formats": ["md"], + "image_export_mode": "placeholder", + "do_ocr": True, + "force_ocr": False, + "ocr_engine": "easyocr", + "ocr_lang": None, + "pdf_backend": "dlparse_v2", + "table_mode": "accurate", + "abort_on_error": False, + "return_as_file": False, + "do_table_structure": True, + "include_images": True, + "images_scale": 2.0, + } endpoint = f"{self.url}/v1alpha/convert/file" response = requests.post(endpoint, files=files, data=params) @@ -154,7 +173,7 @@ def load(self) -> list[Document]: text = document_data.get("md_content", "") metadata = {"Content-Type": self.mime_type} if self.mime_type else {} - + log.debug("Docling extracted text: %s", text) return [Document(page_content=text, metadata=metadata)] From a8f205213c7cde998c57eba59c0c11fdf9d4ff10 Mon Sep 17 00:00:00 2001 From: ofek Date: Wed, 5 Mar 2025 23:19:56 +0200 Subject: [PATCH 209/623] fixed es bugs --- backend/open_webui/config.py | 2 +- .../retrieval/vector/dbs/elasticsearch.py | 226 +++++++++++------- 2 files changed, 135 insertions(+), 93 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 349c35ce5f5..b2a1dc00b55 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1553,7 +1553,7 @@ class BannerModel(BaseModel): ELASTICSEARCH_PASSWORD = os.environ.get("ELASTICSEARCH_PASSWORD", None) ELASTICSEARCH_CLOUD_ID = os.environ.get("ELASTICSEARCH_CLOUD_ID", None) SSL_ASSERT_FINGERPRINT = os.environ.get("SSL_ASSERT_FINGERPRINT", None) - +ELASTICSEARCH_INDEX_PREFIX = os.environ.get("ELASTICSEARCH_INDEX_PREFIX", "open_webui_collections") # Pgvector PGVECTOR_DB_URL = os.environ.get("PGVECTOR_DB_URL", DATABASE_URL) if VECTOR_DB == "pgvector" and not PGVECTOR_DB_URL.startswith("postgres"): diff --git a/backend/open_webui/retrieval/vector/dbs/elasticsearch.py b/backend/open_webui/retrieval/vector/dbs/elasticsearch.py index 2dc79d2c2ea..a558e1fb0c3 100644 --- a/backend/open_webui/retrieval/vector/dbs/elasticsearch.py +++ b/backend/open_webui/retrieval/vector/dbs/elasticsearch.py @@ -1,47 +1,46 @@ from elasticsearch import Elasticsearch, BadRequestError from typing import Optional import ssl -from elasticsearch.helpers import bulk, scan +from elasticsearch.helpers import bulk,scan from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult from open_webui.config import ( ELASTICSEARCH_URL, - ELASTICSEARCH_CA_CERTS, + ELASTICSEARCH_CA_CERTS, ELASTICSEARCH_API_KEY, ELASTICSEARCH_USERNAME, - ELASTICSEARCH_PASSWORD, + ELASTICSEARCH_PASSWORD, ELASTICSEARCH_CLOUD_ID, + ELASTICSEARCH_INDEX_PREFIX, SSL_ASSERT_FINGERPRINT, + ) + + class ElasticsearchClient: """ Important: - in order to reduce the number of indexes and since the embedding vector length is fixed, we avoid creating - an index for each file but store it as a text field, while seperating to different index + in order to reduce the number of indexes and since the embedding vector length is fixed, we avoid creating + an index for each file but store it as a text field, while seperating to different index baesd on the embedding length. """ - def __init__(self): - self.index_prefix = "open_webui_collections" + self.index_prefix = ELASTICSEARCH_INDEX_PREFIX self.client = Elasticsearch( hosts=[ELASTICSEARCH_URL], ca_certs=ELASTICSEARCH_CA_CERTS, api_key=ELASTICSEARCH_API_KEY, cloud_id=ELASTICSEARCH_CLOUD_ID, - basic_auth=( - (ELASTICSEARCH_USERNAME, ELASTICSEARCH_PASSWORD) - if ELASTICSEARCH_USERNAME and ELASTICSEARCH_PASSWORD - else None - ), - ssl_assert_fingerprint=SSL_ASSERT_FINGERPRINT, + basic_auth=(ELASTICSEARCH_USERNAME,ELASTICSEARCH_PASSWORD) if ELASTICSEARCH_USERNAME and ELASTICSEARCH_PASSWORD else None, + ssl_assert_fingerprint=SSL_ASSERT_FINGERPRINT + ) - - # Status: works - def _get_index_name(self, dimension: int) -> str: + #Status: works + def _get_index_name(self,dimension:int)->str: return f"{self.index_prefix}_d{str(dimension)}" - - # Status: works + + #Status: works def _scan_result_to_get_result(self, result) -> GetResult: if not result: return None @@ -56,7 +55,7 @@ def _scan_result_to_get_result(self, result) -> GetResult: return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas]) - # Status: works + #Status: works def _result_to_get_result(self, result) -> GetResult: if not result["hits"]["hits"]: return None @@ -71,7 +70,7 @@ def _result_to_get_result(self, result) -> GetResult: return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas]) - # Status: works + #Status: works def _result_to_search_result(self, result) -> SearchResult: ids = [] distances = [] @@ -85,16 +84,22 @@ def _result_to_search_result(self, result) -> SearchResult: metadatas.append(hit["_source"].get("metadata")) return SearchResult( - ids=[ids], - distances=[distances], - documents=[documents], - metadatas=[metadatas], + ids=[ids], distances=[distances], documents=[documents], metadatas=[metadatas] ) - - # Status: works + #Status: works def _create_index(self, dimension: int): body = { "mappings": { + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + } + ], "properties": { "collection": {"type": "keyword"}, "id": {"type": "keyword"}, @@ -110,51 +115,64 @@ def _create_index(self, dimension: int): } } self.client.indices.create(index=self._get_index_name(dimension), body=body) - - # Status: works + #Status: works def _create_batches(self, items: list[VectorItem], batch_size=100): for i in range(0, len(items), batch_size): - yield items[i : min(i + batch_size, len(items))] + yield items[i : min(i + batch_size,len(items))] - # Status: works - def has_collection(self, collection_name) -> bool: + #Status: works + def has_collection(self,collection_name) -> bool: query_body = {"query": {"bool": {"filter": []}}} - query_body["query"]["bool"]["filter"].append( - {"term": {"collection": collection_name}} - ) + query_body["query"]["bool"]["filter"].append({"term": {"collection": collection_name}}) try: - result = self.client.count(index=f"{self.index_prefix}*", body=query_body) - - return result.body["count"] > 0 + result = self.client.count( + index=f"{self.index_prefix}*", + body=query_body + ) + + return result.body["count"]>0 except Exception as e: return None + - # @TODO: Make this delete a collection and not an index - def delete_colleciton(self, collection_name: str): - # TODO: fix this to include the dimension or a * prefix - # delete_collection here means delete a bunch of documents for an index. - # We are simply adapting to the norms of the other DBs. - self.client.indices.delete(index=self._get_collection_name(collection_name)) - - # Status: works + + def delete_collection(self, collection_name: str): + query = { + "query": { + "term": {"collection": collection_name} + } + } + self.client.delete_by_query(index=f"{self.index_prefix}*", body=query) + #Status: works def search( self, collection_name: str, vectors: list[list[float]], limit: int ) -> Optional[SearchResult]: query = { "size": limit, - "_source": ["text", "metadata"], + "_source": [ + "text", + "metadata" + ], "query": { "script_score": { "query": { - "bool": {"filter": [{"term": {"collection": collection_name}}]} + "bool": { + "filter": [ + { + "term": { + "collection": collection_name + } + } + ] + } }, "script": { "source": "cosineSimilarity(params.vector, 'vector') + 1.0", "params": { "vector": vectors[0] - }, # Assuming single query vector + }, # Assuming single query vector }, } }, @@ -165,8 +183,7 @@ def search( ) return self._result_to_search_result(result) - - # Status: only tested halfwat + #Status: only tested halfwat def query( self, collection_name: str, filter: dict, limit: Optional[int] = None ) -> Optional[GetResult]: @@ -180,9 +197,7 @@ def query( for field, value in filter.items(): query_body["query"]["bool"]["filter"].append({"term": {field: value}}) - query_body["query"]["bool"]["filter"].append( - {"term": {"collection": collection_name}} - ) + query_body["query"]["bool"]["filter"].append({"term": {"collection": collection_name}}) size = limit if limit else 10 try: @@ -191,82 +206,109 @@ def query( body=query_body, size=size, ) - + return self._result_to_get_result(result) except Exception as e: return None + #Status: works + def _has_index(self,dimension:int): + return self.client.indices.exists(index=self._get_index_name(dimension=dimension)) - # Status: works - def _has_index(self, dimension: int): - return self.client.indices.exists( - index=self._get_index_name(dimension=dimension) - ) def get_or_create_index(self, dimension: int): if not self._has_index(dimension=dimension): self._create_index(dimension=dimension) - - # Status: works + #Status: works def get(self, collection_name: str) -> Optional[GetResult]: # Get all the items in the collection. query = { - "query": {"bool": {"filter": [{"term": {"collection": collection_name}}]}}, - "_source": ["text", "metadata"], - } + "query": { + "bool": { + "filter": [ + { + "term": { + "collection": collection_name + } + } + ] + } + }, "_source": ["text", "metadata"]} results = list(scan(self.client, index=f"{self.index_prefix}*", query=query)) - + return self._scan_result_to_get_result(results) - # Status: works + #Status: works def insert(self, collection_name: str, items: list[VectorItem]): if not self._has_index(dimension=len(items[0]["vector"])): self._create_index(dimension=len(items[0]["vector"])) + for batch in self._create_batches(items): actions = [ - { - "_index": self._get_index_name(dimension=len(items[0]["vector"])), - "_id": item["id"], - "_source": { - "collection": collection_name, - "vector": item["vector"], - "text": item["text"], - "metadata": item["metadata"], - }, - } + { + "_index":self._get_index_name(dimension=len(items[0]["vector"])), + "_id": item["id"], + "_source": { + "collection": collection_name, + "vector": item["vector"], + "text": item["text"], + "metadata": item["metadata"], + }, + } for item in batch ] - bulk(self.client, actions) + bulk(self.client,actions) - # Status: should work + # Upsert documents using the update API with doc_as_upsert=True. def upsert(self, collection_name: str, items: list[VectorItem]): if not self._has_index(dimension=len(items[0]["vector"])): - self._create_index(collection_name, dimension=len(items[0]["vector"])) - + self._create_index(dimension=len(items[0]["vector"])) for batch in self._create_batches(items): actions = [ { - "_index": self._get_index_name(dimension=len(items[0]["vector"])), + "_op_type": "update", + "_index": self._get_index_name(dimension=len(item["vector"])), "_id": item["id"], - "_source": { + "doc": { + "collection": collection_name, "vector": item["vector"], "text": item["text"], "metadata": item["metadata"], }, + "doc_as_upsert": True, } for item in batch ] - self.client.bulk(actions) - - # TODO: This currently deletes by * which is not always supported in ElasticSearch. - # Need to read a bit before changing. Also, need to delete from a specific collection - def delete(self, collection_name: str, ids: list[str]): - # Assuming ID is unique across collections and indexes - actions = [ - {"delete": {"_index": f"{self.index_prefix}*", "_id": id}} for id in ids - ] - self.client.bulk(body=actions) + bulk(self.client,actions) + + + # Delete specific documents from a collection by filtering on both collection and document IDs. + def delete( + self, + collection_name: str, + ids: Optional[list[str]] = None, + filter: Optional[dict] = None, + ): + + query = { + "query": { + "bool": { + "filter": [ + {"term": {"collection": collection_name}} + ] + } + } + } + #logic based on chromaDB + if ids: + query["query"]["bool"]["filter"].append({"terms": {"_id": ids}}) + elif filter: + for field, value in filter.items(): + query["query"]["bool"]["filter"].append({"term": {f"metadata.{field}": value}}) + + + self.client.delete_by_query(index=f"{self.index_prefix}*", body=query) def reset(self): indices = self.client.indices.get(index=f"{self.index_prefix}*") From edf94d732b983f2d1661fdc033c3a6ed024abfc1 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 14:54:48 -0800 Subject: [PATCH 210/623] refac --- src/routes/+layout.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/routes/+layout.svelte b/src/routes/+layout.svelte index 4da2cd95bf1..aef9719f164 100644 --- a/src/routes/+layout.svelte +++ b/src/routes/+layout.svelte @@ -476,7 +476,7 @@ // Initialize i18n even if we didn't get a backend config, // so `/error` can show something that's not `undefined`. - initI18n(); + initI18n(localStorage?.locale); if (!localStorage.locale) { const languages = await getLanguages(); const browserLanguages = navigator.languages From 0716f96da8a11148736a9f784967cb3db8c3013c Mon Sep 17 00:00:00 2001 From: Fabio Polito Date: Wed, 5 Mar 2025 23:15:55 +0000 Subject: [PATCH 211/623] style: change style in DoclingLoader --- backend/open_webui/retrieval/loaders/main.py | 21 +++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index a2e6b5cf565..1299bdc6c0e 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -119,14 +119,11 @@ def load(self) -> list[Document]: class DoclingLoader: def __init__(self, url, file_path=None, mime_type=None): - self.url = url.rstrip("/") # Ensure no trailing slash + self.url = url.rstrip("/") self.file_path = file_path self.mime_type = mime_type def load(self) -> list[Document]: - if self.file_path is None: - raise ValueError("File path is required for DoclingLoader") - with open(self.file_path, "rb") as f: files = { "files": ( @@ -167,10 +164,10 @@ def load(self) -> list[Document]: } endpoint = f"{self.url}/v1alpha/convert/file" - response = requests.post(endpoint, files=files, data=params) + r = requests.post(endpoint, files=files, data=params) - if response.ok: - result = response.json() + if r.ok: + result = r.json() document_data = result.get("document", {}) text = document_data.get("md_content", "") @@ -180,14 +177,14 @@ def load(self) -> list[Document]: return [Document(page_content=text, metadata=metadata)] else: - error_msg = f"Error calling Docling API: {response.status_code}" - if response.text: + error_msg = f"Error calling Docling API: {r.reason}" + if r.text: try: - error_data = response.json() + error_data = r.json() if "detail" in error_data: error_msg += f" - {error_data['detail']}" - except: - error_msg += f" - {response.text}" + except Exception: + error_msg += f" - {r.text}" raise Exception(f"Error calling Docling: {error_msg}") From 6b0af942db2f60762866a7ddb847b925635293d0 Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Thu, 6 Mar 2025 07:23:55 +0800 Subject: [PATCH 212/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 8cb3632c4ec..cba9e6291ad 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -450,7 +450,7 @@ "Example: mail": "範例:mail", "Example: ou=users,dc=foo,dc=example": "範例:ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "範例:sAMAccountName 或 uid 或 userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "您的授權名額已超過上限。請聯絡支援以增加授權名額。", "Exclude": "排除", "Execute code for analysis": "執行程式碼以進行分析", "Expand": "展開", @@ -1158,7 +1158,7 @@ "Write your model template content here": "在此撰寫您的模型範本内容", "Yesterday": "昨天", "You": "您", - "You are currently using a trial license. Please contact support to upgrade your license.": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "您目前使用的是試用授權。請聯絡支援以升級您的授權。", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "您一次最多只能與 {{maxCount}} 個檔案進行對話。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "您可以透過下方的「管理」按鈕新增記憶,將您與大型語言模型的互動個人化,讓它們更有幫助並更符合您的需求。", "You cannot upload an empty file.": "您無法上傳空檔案", From 2982893d0d1a3428136294a89954cead7266bdba Mon Sep 17 00:00:00 2001 From: Fabio Polito Date: Thu, 6 Mar 2025 00:39:00 +0000 Subject: [PATCH 213/623] fix: format fixes --- CONTRIBUTING.md | 196 ------- README.md | 16 +- backend/open_webui/static/site.webmanifest | 2 +- .../admin/Settings/Documents.svelte | 6 +- uv.lock | 477 +++++++++++++++++- 5 files changed, 473 insertions(+), 224 deletions(-) delete mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 1a2ccc10171..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,196 +0,0 @@ - -# Contributing Guide - -## Development Guidelines - -### Code Quality Tools - -1. Pre-commit setup: - ```bash - pre-commit install - ``` - -2. Configured hooks: - - YAML checking - - End-of-file fixer - - Trailing whitespace removal - - Ruff (linting + formatting) - - MyPy (type checking) - -### Coding Standards -- Follow PEP 8 guidelines. -- Use type hints consistently. -- Maximum line length: 130 characters. -- Use single quotes for strings. - -### Commit Guidelines -Use Commitizen for standardized commits: -```bash -git cz -``` - -## Git Strategy: Feature branch - -The **Git Feature Branch Workflow** is a way to work on new features in a project without messing up the main code. Instead of working directly on the `main` branch (the "official" code), you create a separate branch for each feature. This keeps the `main` branch clean and stable. - ---- - -## How It Works (Diagram) - - -**Example:** -```bash -git branch -d add-login-button -git push origin --delete add-login-button -``` - - -**Example Workflow (Diagram)** - -Here’s an example of how Mary uses this workflow: - -```mermaid -sequenceDiagram - participant Mary - participant GitHub - participant Bill - - Mary->>GitHub: Create a new branch (add-login-button) - Mary->>Mary: Make changes and commit - Mary->>GitHub: Push branch to remote - Mary->>GitHub: Open a pull request - Bill->>GitHub: Review pull request - Bill->>Mary: Request changes - Mary->>Mary: Fix feedback and commit - Mary->>GitHub: Push updates - Bill->>GitHub: Approve pull request - Mary->>GitHub: Merge branch into main - Mary->>GitHub: Delete feature branch -``` - ---- - -## General Step-by-Step Instructions - -### 1. Start with the main branch -Make sure your local main branch is up-to-date with the latest code from the central repository. - -```bash -git checkout main -git fetch origin -git reset --hard origin/main -``` - -### 2. Create a new branch for your feature -Create a branch for your feature. Use a clear name that describes what you’re working on, like `add-login-button` or `fix-bug-123`. - -```bash -git checkout -b your-branch-name -``` - -**Example:** -```bash -git checkout -b add-login-button -``` - -### 3. Work on your feature -Make changes to the code. After making changes, save your work by following these steps: - -- Check what files you’ve changed: - ```bash - git status - ``` - -- Add the files you want to save: - ```bash - git add - ``` - - **Example:** - ```bash - git add index.html - ``` - -- Save your changes with a message: - ```bash - git commit -m "Describe what you changed" - ``` - - **Example:** - ```bash - git commit -m "Added login button to homepage" - ``` - -### 4. Push your branch to the remote repository -To back up your work and share it with others, push your branch to the central repository. - -```bash -git push -u origin your-branch-name -``` - -**Example:** -```bash -git push -u origin add-login-button -``` - -### 5. Open a pull request -Go to your Git hosting platform (like GitLab) and open a pull request. This is how you ask your team to review your changes and approve them before adding them to the main branch. - -### 6. Fix feedback from reviewers -If your teammates suggest changes, follow these steps to update your branch: - -- Make the changes locally. -- Save the changes: - ```bash - git add - git commit -m "Fixed feedback" - git push - ``` - -### 7. Merge your branch into main -Once your pull request is approved, it’s time to merge your branch into the main branch. - -- Switch to the main branch: - ```bash - git checkout main - ``` - -- Update your local main branch: - ```bash - git pull - ``` - -- Merge your feature branch into main: - ```bash - git merge your-branch-name - ``` - -- Push the updated main branch to the remote repository: - ```bash - git push - ``` - -### 8. Delete your feature branch -After merging, delete your feature branch to keep things clean. - -- Delete the branch locally: - ```bash - git branch -d your-branch-name - ``` - -- Delete the branch from the remote repository: - ```bash - git push origin --delete your-branch-name - ``` - - -## Summary - -- Create a branch for each feature. -- Work on your branch without touching `main`. -- Push your branch to back up your work. -- Open a pull request to get feedback and approval. -- Merge your branch into `main` when it’s ready. -- Delete your branch after merging. - -By following these steps, you’ll keep the `main` branch clean and make it easy for your team to collaborate. diff --git a/README.md b/README.md index b1c6f0895bb..54ad41503d6 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,4 @@ -# Open WebUI 👋 (FORK FOR E4) - -# First time -git remote add upstream https://github.com/open-webui/open-webui.git - -# Fetch changes from upstream -git fetch upstream - -# Merge changes into your main branch -git checkout main -git merge upstream/main - -# Push changes to GitLab -git push origin main - +# Open WebUI 👋 ![GitHub stars](https://img.shields.io/github/stars/open-webui/open-webui?style=social) ![GitHub forks](https://img.shields.io/github/forks/open-webui/open-webui?style=social) diff --git a/backend/open_webui/static/site.webmanifest b/backend/open_webui/static/site.webmanifest index 2b74733fa23..9830b759407 100644 --- a/backend/open_webui/static/site.webmanifest +++ b/backend/open_webui/static/site.webmanifest @@ -18,4 +18,4 @@ "theme_color": "#ffffff", "background_color": "#ffffff", "display": "standalone" -} +} \ No newline at end of file diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 2d2ea453129..84248d7bc2c 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -260,7 +260,7 @@ await setEmbeddingConfig(); await setRerankingConfig(); - querySettings = await getQuerySettings(localStorage.token); + querySettings = await getQuerySettings(localStorage.token); const res = await getRAGConfig(localStorage.token); @@ -275,8 +275,8 @@ BYPASS_EMBEDDING_AND_RETRIEVAL = res.BYPASS_EMBEDDING_AND_RETRIEVAL; contentExtractionEngine = res.content_extraction.engine; - tikaServerUrl = res.content_extraction.tika_server_url ?? ''; - doclingServerUrl = res.content_extraction.docling_server_url ?? ''; // Load doclingServerUrl + tikaServerUrl = res.content_extraction.tika_server_url; + doclingServerUrl = res.content_extraction.docling_server_url; showTikaServerUrl = contentExtractionEngine === 'tika'; showDoclingServerUrl = contentExtractionEngine === 'docling'; diff --git a/uv.lock b/uv.lock index 867725d68c5..ca5e857073f 100644 --- a/uv.lock +++ b/uv.lock @@ -69,8 +69,7 @@ resolution-markers = [ "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version < '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'", - "python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", + "python_full_version < '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.12.4' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_system == 'Linux'", @@ -108,8 +107,7 @@ resolution-markers = [ "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version < '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')", - "(python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", + "(python_full_version < '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.12.4' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12.4' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.13' and platform_system != 'Darwin' and platform_system != 'Linux')", @@ -489,6 +487,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/53/e1/5120fbb8438a0d718e063f70168a2975e03f00ce6b86e74b8eec079cb492/bitarray-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fcef31b062f756ba7eebcd7890c5d5de84b9d64ee877325257bcc9782288564a", size = 281535 }, { url = "https://files.pythonhosted.org/packages/73/75/8acebbbb4f85dcca73b8e91dde5d3e1e3e2317b36fae4f5b133c60720834/bitarray-3.0.0-cp312-cp312-win32.whl", hash = "sha256:656db7bdf1d81ec3b57b3cad7ec7276765964bcfd0eb81c5d1331f385298169c", size = 114423 }, { url = "https://files.pythonhosted.org/packages/ca/56/dadae4d4351b337de6e0269001fb40f3ebe9f72222190456713d2c1be53d/bitarray-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f785af6b7cb07a9b1e5db0dea9ef9e3e8bb3d74874a0a61303eab9c16acc1999", size = 121680 }, + { url = "https://files.pythonhosted.org/packages/4f/30/07d7be4624981537d32b261dc48a16b03757cc9d88f66012d93acaf11663/bitarray-3.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7cb885c043000924554fe2124d13084c8fdae03aec52c4086915cd4cb87fe8be", size = 172147 }, + { url = "https://files.pythonhosted.org/packages/f0/e9/be1fa2828bad9cb32e1309e6dbd05adcc41679297d9e96bbb372be928e38/bitarray-3.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7814c9924a0b30ecd401f02f082d8697fc5a5be3f8d407efa6e34531ff3c306a", size = 123319 }, + { url = "https://files.pythonhosted.org/packages/22/28/33601d276a6eb76e40fe8a61c61f59cc9ff6d9ecf0b676235c02689475b8/bitarray-3.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bcf524a087b143ba736aebbb054bb399d49e77cf7c04ed24c728e411adc82bfa", size = 121236 }, + { url = "https://files.pythonhosted.org/packages/85/d3/f36b213ffae8f9c8e4c6f12a91e18c06570a04f42d5a1bda4303380f2639/bitarray-3.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1d5abf1d6d910599ac16afdd9a0ed3e24f3b46af57f3070cf2792f236f36e0b", size = 287395 }, + { url = "https://files.pythonhosted.org/packages/b7/1a/2da3b00d876883b05ffd3be9b1311858b48d4a26579f8647860e271c5385/bitarray-3.0.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9929051feeaf8d948cc0b1c9ce57748079a941a1a15c89f6014edf18adaade84", size = 301501 }, + { url = "https://files.pythonhosted.org/packages/88/b9/c1b5af8d1c918f1ee98748f7f7270f932f531c2259dd578c0edcf16ec73e/bitarray-3.0.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96cf0898f8060b2d3ae491762ae871b071212ded97ff9e1e3a5229e9fefe544c", size = 304804 }, + { url = "https://files.pythonhosted.org/packages/92/24/81a10862856419638c0db13e04de7cbf19938353517a67e4848c691f0b7c/bitarray-3.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab37da66a8736ad5a75a58034180e92c41e864da0152b84e71fcc253a2f69cd4", size = 288507 }, + { url = "https://files.pythonhosted.org/packages/da/70/a093af92ef7b207a59087e3b5819e03767fbdda9dd56aada3a4ee25a1fbd/bitarray-3.0.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beeb79e476d19b91fd6a3439853e4e5ba1b3b475920fa40d62bde719c8af786f", size = 278905 }, + { url = "https://files.pythonhosted.org/packages/fb/40/0925c6079c4b282b16eb9085f82df0cdf1f787fb4c67fd4baca3e37acf7f/bitarray-3.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f75fc0198c955d840b836059bd43e0993edbf119923029ca60c4fc017cefa54a", size = 281909 }, + { url = "https://files.pythonhosted.org/packages/61/4b/e11754a5d34cb997250d8019b1fe555d4c06fe2d2a68b0bf7c5580537046/bitarray-3.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f12cc7c7638074918cdcc7491aff897df921b092ffd877227892d2686e98f876", size = 274711 }, + { url = "https://files.pythonhosted.org/packages/5b/78/39513f75423959ee2d82a82e10296b6a7bc7d880b16d714980a6752ef33b/bitarray-3.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dbe1084935b942fab206e609fa1ed3f46ad1f2612fb4833e177e9b2a5e006c96", size = 297038 }, + { url = "https://files.pythonhosted.org/packages/af/a2/5cb81f8773a479de7c06cc1ada36d5cc5a8ebcd8715013e1c4e01a76e84a/bitarray-3.0.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ac06dd72ee1e1b6e312504d06f75220b5894af1fb58f0c20643698f5122aea76", size = 309814 }, + { url = "https://files.pythonhosted.org/packages/03/3e/795b57c6f6eea61c47d0716e1d60219218028b1f260f7328802eac684964/bitarray-3.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:00f9a88c56e373009ac3c73c55205cfbd9683fbd247e2f9a64bae3da78795252", size = 281564 }, + { url = "https://files.pythonhosted.org/packages/f6/31/5914002ae4dd0e0079f8bccfd0647119cff364280d106108a19bd2511933/bitarray-3.0.0-cp313-cp313-win32.whl", hash = "sha256:9c6e52005e91803eb4e08c0a08a481fb55ddce97f926bae1f6fa61b3396b5b61", size = 114404 }, + { url = "https://files.pythonhosted.org/packages/76/0a/184f85a1739db841ae8fbb1d9ec028240d5a351e36abec9cd020de889dab/bitarray-3.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:cb98d5b6eac4b2cf2a5a69f60a9c499844b8bea207059e9fc45c752436e6bb49", size = 121672 }, ] [[package]] @@ -616,6 +629,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, ] [[package]] @@ -659,6 +683,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, ] @@ -1177,6 +1214,14 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9c/ec/ade054097976c3d6debc9032e09a351505a0196aa5493edf021be376f75e/fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a", size = 5001832 }, { url = "https://files.pythonhosted.org/packages/e2/cd/233f0e31ad799bb91fc78099c8b4e5ec43b85a131688519640d6bae46f6a/fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07", size = 2162228 }, { url = "https://files.pythonhosted.org/packages/46/45/a498b5291f6c0d91b2394b1ed7447442a57d1c9b9cf8f439aee3c316a56e/fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54", size = 2209118 }, + { url = "https://files.pythonhosted.org/packages/9c/9f/00142a19bad96eeeb1aed93f567adc19b7f2c1af6f5bc0a1c3de90b4b1ac/fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29", size = 2752812 }, + { url = "https://files.pythonhosted.org/packages/b0/20/14b8250d63ba65e162091fb0dda07730f90c303bbf5257e9ddacec7230d9/fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4", size = 2291521 }, + { url = "https://files.pythonhosted.org/packages/34/47/a681cfd10245eb74f65e491a934053ec75c4af639655446558f29818e45e/fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca", size = 4770980 }, + { url = "https://files.pythonhosted.org/packages/d2/6c/a7066afc19db0705a12efd812e19c32cde2b9514eb714659522f2ebd60b6/fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b", size = 4845534 }, + { url = "https://files.pythonhosted.org/packages/0c/a2/3c204fbabbfd845d9bdcab9ae35279d41e9a4bf5c80a0a2708f9c5a195d6/fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048", size = 4753910 }, + { url = "https://files.pythonhosted.org/packages/6e/8c/b4cb3592880340b89e4ef6601b531780bba73862332a6451d78fe135d6cb/fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe", size = 4976411 }, + { url = "https://files.pythonhosted.org/packages/fc/a8/4bf98840ff89fcc188470b59daec57322178bf36d2f4f756cd19a42a826b/fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628", size = 2160178 }, + { url = "https://files.pythonhosted.org/packages/e6/57/4cc35004605416df3225ff362f3455cf09765db00df578ae9e46d0fefd23/fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b", size = 2206102 }, { url = "https://files.pythonhosted.org/packages/99/3b/406d17b1f63e04a82aa621936e6e1c53a8c05458abd66300ac85ea7f9ae9/fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977", size = 1111638 }, ] @@ -1230,6 +1275,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/37/e0/47f87544055b3349b633a03c4d94b405956cf2437f4ab46d0928b74b7526/frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f", size = 280569 }, { url = "https://files.pythonhosted.org/packages/f9/7c/490133c160fb6b84ed374c266f42800e33b50c3bbab1652764e6e1fc498a/frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8", size = 44721 }, { url = "https://files.pythonhosted.org/packages/b1/56/4e45136ffc6bdbfa68c29ca56ef53783ef4c2fd395f7cbf99a2624aa9aaa/frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f", size = 51329 }, + { url = "https://files.pythonhosted.org/packages/da/3b/915f0bca8a7ea04483622e84a9bd90033bab54bdf485479556c74fd5eaf5/frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953", size = 91538 }, + { url = "https://files.pythonhosted.org/packages/c7/d1/a7c98aad7e44afe5306a2b068434a5830f1470675f0e715abb86eb15f15b/frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0", size = 52849 }, + { url = "https://files.pythonhosted.org/packages/3a/c8/76f23bf9ab15d5f760eb48701909645f686f9c64fbb8982674c241fbef14/frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2", size = 50583 }, + { url = "https://files.pythonhosted.org/packages/1f/22/462a3dd093d11df623179d7754a3b3269de3b42de2808cddef50ee0f4f48/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f", size = 265636 }, + { url = "https://files.pythonhosted.org/packages/80/cf/e075e407fc2ae7328155a1cd7e22f932773c8073c1fc78016607d19cc3e5/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608", size = 270214 }, + { url = "https://files.pythonhosted.org/packages/a1/58/0642d061d5de779f39c50cbb00df49682832923f3d2ebfb0fedf02d05f7f/frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b", size = 273905 }, + { url = "https://files.pythonhosted.org/packages/ab/66/3fe0f5f8f2add5b4ab7aa4e199f767fd3b55da26e3ca4ce2cc36698e50c4/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840", size = 250542 }, + { url = "https://files.pythonhosted.org/packages/f6/b8/260791bde9198c87a465224e0e2bb62c4e716f5d198fc3a1dacc4895dbd1/frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439", size = 267026 }, + { url = "https://files.pythonhosted.org/packages/2e/a4/3d24f88c527f08f8d44ade24eaee83b2627793fa62fa07cbb7ff7a2f7d42/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de", size = 257690 }, + { url = "https://files.pythonhosted.org/packages/de/9a/d311d660420b2beeff3459b6626f2ab4fb236d07afbdac034a4371fe696e/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641", size = 253893 }, + { url = "https://files.pythonhosted.org/packages/c6/23/e491aadc25b56eabd0f18c53bb19f3cdc6de30b2129ee0bc39cd387cd560/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e", size = 267006 }, + { url = "https://files.pythonhosted.org/packages/08/c4/ab918ce636a35fb974d13d666dcbe03969592aeca6c3ab3835acff01f79c/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9", size = 276157 }, + { url = "https://files.pythonhosted.org/packages/c0/29/3b7a0bbbbe5a34833ba26f686aabfe982924adbdcafdc294a7a129c31688/frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03", size = 264642 }, + { url = "https://files.pythonhosted.org/packages/ab/42/0595b3dbffc2e82d7fe658c12d5a5bafcd7516c6bf2d1d1feb5387caa9c1/frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c", size = 44914 }, + { url = "https://files.pythonhosted.org/packages/17/c4/b7db1206a3fea44bf3b838ca61deb6f74424a8a5db1dd53ecb21da669be6/frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28", size = 51167 }, { url = "https://files.pythonhosted.org/packages/c6/c8/a5be5b7550c10858fcf9b0ea054baccab474da77d37f1e828ce043a3a5d4/frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3", size = 11901 }, ] @@ -1529,6 +1589,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, + { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, + { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, + { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, + { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, + { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, + { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, + { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, + { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, + { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, + { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, + { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, + { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, + { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, + { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, + { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, ] [[package]] @@ -1555,6 +1631,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/94/16550ad6b3f13b96f0856ee5dfc2554efac28539ee84a51d7b14526da985/grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38", size = 6149369 }, { url = "https://files.pythonhosted.org/packages/33/0d/4c3b2587e8ad7f121b597329e6c2620374fccbc2e4e1aa3c73ccc670fde4/grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78", size = 3599176 }, { url = "https://files.pythonhosted.org/packages/7d/36/0c03e2d80db69e2472cf81c6123aa7d14741de7cf790117291a703ae6ae1/grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc", size = 4346574 }, + { url = "https://files.pythonhosted.org/packages/12/d2/2f032b7a153c7723ea3dea08bffa4bcaca9e0e5bdf643ce565b76da87461/grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b", size = 5091487 }, + { url = "https://files.pythonhosted.org/packages/d0/ae/ea2ff6bd2475a082eb97db1104a903cf5fc57c88c87c10b3c3f41a184fc0/grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1", size = 10943530 }, + { url = "https://files.pythonhosted.org/packages/07/62/646be83d1a78edf8d69b56647327c9afc223e3140a744c59b25fbb279c3b/grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af", size = 5589079 }, + { url = "https://files.pythonhosted.org/packages/d0/25/71513d0a1b2072ce80d7f5909a93596b7ed10348b2ea4fdcbad23f6017bf/grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955", size = 6213542 }, + { url = "https://files.pythonhosted.org/packages/76/9a/d21236297111052dcb5dc85cd77dc7bf25ba67a0f55ae028b2af19a704bc/grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8", size = 5850211 }, + { url = "https://files.pythonhosted.org/packages/2d/fe/70b1da9037f5055be14f359026c238821b9bcf6ca38a8d760f59a589aacd/grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62", size = 6572129 }, + { url = "https://files.pythonhosted.org/packages/74/0d/7df509a2cd2a54814598caf2fb759f3e0b93764431ff410f2175a6efb9e4/grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb", size = 6149819 }, + { url = "https://files.pythonhosted.org/packages/0a/08/bc3b0155600898fd10f16b79054e1cca6cb644fa3c250c0fe59385df5e6f/grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121", size = 3596561 }, + { url = "https://files.pythonhosted.org/packages/5a/96/44759eca966720d0f3e1b105c43f8ad4590c97bf8eb3cd489656e9590baa/grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba", size = 4346042 }, ] [[package]] @@ -1689,6 +1774,13 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/d8/254d16a31d543073a0e57f1c329ca7378d8924e7e292eda72d0064987486/httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81", size = 485289 }, { url = "https://files.pythonhosted.org/packages/5f/3c/4aee161b4b7a971660b8be71a92c24d6c64372c1ab3ae7f366b3680df20f/httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f", size = 489779 }, { url = "https://files.pythonhosted.org/packages/12/b7/5cae71a8868e555f3f67a50ee7f673ce36eac970f029c0c5e9d584352961/httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970", size = 88634 }, + { url = "https://files.pythonhosted.org/packages/94/a3/9fe9ad23fd35f7de6b91eeb60848986058bd8b5a5c1e256f5860a160cc3e/httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660", size = 197214 }, + { url = "https://files.pythonhosted.org/packages/ea/d9/82d5e68bab783b632023f2fa31db20bebb4e89dfc4d2293945fd68484ee4/httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083", size = 102431 }, + { url = "https://files.pythonhosted.org/packages/96/c1/cb499655cbdbfb57b577734fde02f6fa0bbc3fe9fb4d87b742b512908dff/httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3", size = 473121 }, + { url = "https://files.pythonhosted.org/packages/af/71/ee32fd358f8a3bb199b03261f10921716990808a675d8160b5383487a317/httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071", size = 473805 }, + { url = "https://files.pythonhosted.org/packages/8a/0a/0d4df132bfca1507114198b766f1737d57580c9ad1cf93c1ff673e3387be/httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5", size = 448858 }, + { url = "https://files.pythonhosted.org/packages/1e/6a/787004fdef2cabea27bad1073bf6a33f2437b4dbd3b6fb4a9d71172b1c7c/httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0", size = 452042 }, + { url = "https://files.pythonhosted.org/packages/4d/dc/7decab5c404d1d2cdc1bb330b1bf70e83d6af0396fd4fc76fc60c0d522bf/httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8", size = 87682 }, ] [[package]] @@ -1849,6 +1941,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/37/3394bb47bac1ad2cb0465601f86828a0518d07828a650722e55268cdb7e6/jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29", size = 503730 }, { url = "https://files.pythonhosted.org/packages/f9/e2/253fc1fa59103bb4e3aa0665d6ceb1818df1cd7bf3eb492c4dad229b1cd4/jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e", size = 203375 }, { url = "https://files.pythonhosted.org/packages/41/69/6d4bbe66b3b3b4507e47aa1dd5d075919ad242b4b1115b3f80eecd443687/jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c", size = 204740 }, + { url = "https://files.pythonhosted.org/packages/6c/b0/bfa1f6f2c956b948802ef5a021281978bf53b7a6ca54bb126fd88a5d014e/jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84", size = 301190 }, + { url = "https://files.pythonhosted.org/packages/a4/8f/396ddb4e292b5ea57e45ade5dc48229556b9044bad29a3b4b2dddeaedd52/jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4", size = 309334 }, + { url = "https://files.pythonhosted.org/packages/7f/68/805978f2f446fa6362ba0cc2e4489b945695940656edd844e110a61c98f8/jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587", size = 333918 }, + { url = "https://files.pythonhosted.org/packages/b3/99/0f71f7be667c33403fa9706e5b50583ae5106d96fab997fa7e2f38ee8347/jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c", size = 356057 }, + { url = "https://files.pythonhosted.org/packages/8d/50/a82796e421a22b699ee4d2ce527e5bcb29471a2351cbdc931819d941a167/jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18", size = 379790 }, + { url = "https://files.pythonhosted.org/packages/3c/31/10fb012b00f6d83342ca9e2c9618869ab449f1aa78c8f1b2193a6b49647c/jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6", size = 388285 }, + { url = "https://files.pythonhosted.org/packages/c8/81/f15ebf7de57be488aa22944bf4274962aca8092e4f7817f92ffa50d3ee46/jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef", size = 344764 }, + { url = "https://files.pythonhosted.org/packages/b3/e8/0cae550d72b48829ba653eb348cdc25f3f06f8a62363723702ec18e7be9c/jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1", size = 376620 }, + { url = "https://files.pythonhosted.org/packages/b8/50/e5478ff9d82534a944c03b63bc217c5f37019d4a34d288db0f079b13c10b/jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9", size = 510402 }, + { url = "https://files.pythonhosted.org/packages/8e/1e/3de48bbebbc8f7025bd454cedc8c62378c0e32dd483dece5f4a814a5cb55/jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05", size = 503018 }, + { url = "https://files.pythonhosted.org/packages/d5/cd/d5a5501d72a11fe3e5fd65c78c884e5164eefe80077680533919be22d3a3/jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a", size = 203190 }, + { url = "https://files.pythonhosted.org/packages/51/bf/e5ca301245ba951447e3ad677a02a64a8845b185de2603dabd83e1e4b9c6/jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865", size = 203551 }, + { url = "https://files.pythonhosted.org/packages/2f/3c/71a491952c37b87d127790dd7a0b1ebea0514c6b6ad30085b16bbe00aee6/jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca", size = 308347 }, + { url = "https://files.pythonhosted.org/packages/a0/4c/c02408042e6a7605ec063daed138e07b982fdb98467deaaf1c90950cf2c6/jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0", size = 342875 }, + { url = "https://files.pythonhosted.org/packages/91/61/c80ef80ed8a0a21158e289ef70dac01e351d929a1c30cb0f49be60772547/jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566", size = 202374 }, ] [[package]] @@ -2099,6 +2206,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/ed/e6276c8d9668028213df01f598f385b05b55a4e1b4662ee12ef05dab35aa/lxml-5.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e63601ad5cd8f860aa99d109889b5ac34de571c7ee902d6812d5d9ddcc77fa7d", size = 5012542 }, { url = "https://files.pythonhosted.org/packages/36/88/684d4e800f5aa28df2a991a6a622783fb73cf0e46235cfa690f9776f032e/lxml-5.3.0-cp312-cp312-win32.whl", hash = "sha256:17e8d968d04a37c50ad9c456a286b525d78c4a1c15dd53aa46c1d8e06bf6fa30", size = 3486454 }, { url = "https://files.pythonhosted.org/packages/fc/82/ace5a5676051e60355bd8fb945df7b1ba4f4fb8447f2010fb816bfd57724/lxml-5.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:c1a69e58a6bb2de65902051d57fde951febad631a20a64572677a1052690482f", size = 3816857 }, + { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, + { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, + { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, + { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, + { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, + { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, + { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, + { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, + { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, + { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, + { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, + { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, + { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, + { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, + { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, + { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, + { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, ] [[package]] @@ -2160,6 +2284,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, ] [[package]] @@ -2235,6 +2379,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9b/fd/eb1a3573cda74d4c2381d10ded62c128e869954ced1881c15e2bcd97a48f/mmh3-5.0.1-cp312-cp312-win32.whl", hash = "sha256:842516acf04da546f94fad52db125ee619ccbdcada179da51c326a22c4578cb9", size = 39206 }, { url = "https://files.pythonhosted.org/packages/66/e8/542ed252924002b84c43a68a080cfd4facbea0d5df361e4f59637638d3c7/mmh3-5.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:d963be0dbfd9fca209c17172f6110787ebf78934af25e3694fe2ba40e55c1e2b", size = 39799 }, { url = "https://files.pythonhosted.org/packages/bd/25/ff2cd36c82a23afa57a05cdb52ab467a911fb12c055c8a8238c0d426cbf0/mmh3-5.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:a5da292ceeed8ce8e32b68847261a462d30fd7b478c3f55daae841404f433c15", size = 36537 }, + { url = "https://files.pythonhosted.org/packages/09/e0/fb19c46265c18311b422ba5ce3e18046ad45c48cfb213fd6dbec23ae6b51/mmh3-5.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:673e3f1c8d4231d6fb0271484ee34cb7146a6499fc0df80788adb56fd76842da", size = 52909 }, + { url = "https://files.pythonhosted.org/packages/c3/94/54fc591e7a24c7ce2c531ecfc5715cff932f9d320c2936550cc33d67304d/mmh3-5.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f795a306bd16a52ad578b663462cc8e95500b3925d64118ae63453485d67282b", size = 38396 }, + { url = "https://files.pythonhosted.org/packages/1f/9a/142bcc9d0d28fc8ae45bbfb83926adc069f984cdf3495a71534cc22b8e27/mmh3-5.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5ed57a5e28e502a1d60436cc25c76c3a5ba57545f250f2969af231dc1221e0a5", size = 38207 }, + { url = "https://files.pythonhosted.org/packages/f8/5b/f1c9110aa70321bb1ee713f17851b9534586c63bc25e0110e4fc03ae2450/mmh3-5.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:632c28e7612e909dbb6cbe2fe496201ada4695b7715584005689c5dc038e59ad", size = 94988 }, + { url = "https://files.pythonhosted.org/packages/87/e5/4dc67e7e0e716c641ab0a5875a659e37258417439590feff5c3bd3ff4538/mmh3-5.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53fd6bd525a5985e391c43384672d9d6b317fcb36726447347c7fc75bfed34ec", size = 99969 }, + { url = "https://files.pythonhosted.org/packages/ac/68/d148327337687c53f04ad9ceaedfa9ad155ee0111d0cb06220f044d66720/mmh3-5.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dceacf6b0b961a0e499836af3aa62d60633265607aef551b2a3e3c48cdaa5edd", size = 99662 }, + { url = "https://files.pythonhosted.org/packages/13/79/782adb6df6397947c1097b1e94b7f8d95629a4a73df05cf7207bd5148c1f/mmh3-5.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0738d478fdfb5d920f6aff5452c78f2c35b0eff72caa2a97dfe38e82f93da2", size = 87606 }, + { url = "https://files.pythonhosted.org/packages/f2/c2/0404383281df049d0e4ccf07fabd659fc1f3da834df6708d934116cbf45d/mmh3-5.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e70285e7391ab88b872e5bef632bad16b9d99a6d3ca0590656a4753d55988af", size = 94836 }, + { url = "https://files.pythonhosted.org/packages/c8/33/fda67c5f28e4c2131891cf8cbc3513cfc55881e3cfe26e49328e38ffacb3/mmh3-5.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:27e5fc6360aa6b828546a4318da1a7da6bf6e5474ccb053c3a6aa8ef19ff97bd", size = 90492 }, + { url = "https://files.pythonhosted.org/packages/64/2f/0ed38aefe2a87f30bb1b12e5b75dc69fcffdc16def40d1752d6fc7cbbf96/mmh3-5.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7989530c3c1e2c17bf5a0ec2bba09fd19819078ba90beedabb1c3885f5040b0d", size = 89594 }, + { url = "https://files.pythonhosted.org/packages/95/ab/6e7a5e765fc78e3dbd0a04a04cfdf72e91eb8e31976228e69d82c741a5b4/mmh3-5.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cdad7bee649950da7ecd3cbbbd12fb81f1161072ecbdb5acfa0018338c5cb9cf", size = 94929 }, + { url = "https://files.pythonhosted.org/packages/74/51/f748f00c072006f4a093d9b08853a0e2e3cd5aeaa91343d4e2d942851978/mmh3-5.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e143b8f184c1bb58cecd85ab4a4fd6dc65a2d71aee74157392c3fddac2a4a331", size = 91317 }, + { url = "https://files.pythonhosted.org/packages/df/a1/21ee8017a7feb0270c49f756ff56da9f99bd150dcfe3b3f6f0d4b243423d/mmh3-5.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5eb12e886f3646dd636f16b76eb23fc0c27e8ff3c1ae73d4391e50ef60b40f6", size = 89861 }, + { url = "https://files.pythonhosted.org/packages/c2/d2/46a6d070de4659bdf91cd6a62d659f8cc547dadee52b6d02bcbacb3262ed/mmh3-5.0.1-cp313-cp313-win32.whl", hash = "sha256:16e6dddfa98e1c2d021268e72c78951234186deb4df6630e984ac82df63d0a5d", size = 39201 }, + { url = "https://files.pythonhosted.org/packages/ed/07/316c062f09019b99b248a4183c5333f8eeebe638345484774908a8f2c9c0/mmh3-5.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:d3ffb792d70b8c4a2382af3598dad6ae0c5bd9cee5b7ffcc99aa2f5fd2c1bf70", size = 39807 }, + { url = "https://files.pythonhosted.org/packages/9d/d3/f7e6d7d062b8d7072c3989a528d9d47486ee5d5ae75250f6e26b4976d098/mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896", size = 36539 }, ] [[package]] @@ -2330,6 +2490,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/77/00/8538f11e3356b5d95fa4b024aa566cde7a38aa7a5f08f4912b32a037c5dc/multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3", size = 125360 }, { url = "https://files.pythonhosted.org/packages/be/05/5d334c1f2462d43fec2363cd00b1c44c93a78c3925d952e9a71caf662e96/multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133", size = 26382 }, { url = "https://files.pythonhosted.org/packages/a3/bf/f332a13486b1ed0496d624bcc7e8357bb8053823e8cd4b9a18edc1d97e73/multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1", size = 28529 }, + { url = "https://files.pythonhosted.org/packages/22/67/1c7c0f39fe069aa4e5d794f323be24bf4d33d62d2a348acdb7991f8f30db/multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008", size = 48771 }, + { url = "https://files.pythonhosted.org/packages/3c/25/c186ee7b212bdf0df2519eacfb1981a017bda34392c67542c274651daf23/multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f", size = 29533 }, + { url = "https://files.pythonhosted.org/packages/67/5e/04575fd837e0958e324ca035b339cea174554f6f641d3fb2b4f2e7ff44a2/multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28", size = 29595 }, + { url = "https://files.pythonhosted.org/packages/d3/b2/e56388f86663810c07cfe4a3c3d87227f3811eeb2d08450b9e5d19d78876/multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b", size = 130094 }, + { url = "https://files.pythonhosted.org/packages/6c/ee/30ae9b4186a644d284543d55d491fbd4239b015d36b23fea43b4c94f7052/multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c", size = 134876 }, + { url = "https://files.pythonhosted.org/packages/84/c7/70461c13ba8ce3c779503c70ec9d0345ae84de04521c1f45a04d5f48943d/multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3", size = 133500 }, + { url = "https://files.pythonhosted.org/packages/4a/9f/002af221253f10f99959561123fae676148dd730e2daa2cd053846a58507/multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44", size = 131099 }, + { url = "https://files.pythonhosted.org/packages/82/42/d1c7a7301d52af79d88548a97e297f9d99c961ad76bbe6f67442bb77f097/multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2", size = 120403 }, + { url = "https://files.pythonhosted.org/packages/68/f3/471985c2c7ac707547553e8f37cff5158030d36bdec4414cb825fbaa5327/multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3", size = 125348 }, + { url = "https://files.pythonhosted.org/packages/67/2c/e6df05c77e0e433c214ec1d21ddd203d9a4770a1f2866a8ca40a545869a0/multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa", size = 119673 }, + { url = "https://files.pythonhosted.org/packages/c5/cd/bc8608fff06239c9fb333f9db7743a1b2eafe98c2666c9a196e867a3a0a4/multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa", size = 129927 }, + { url = "https://files.pythonhosted.org/packages/44/8e/281b69b7bc84fc963a44dc6e0bbcc7150e517b91df368a27834299a526ac/multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4", size = 128711 }, + { url = "https://files.pythonhosted.org/packages/12/a4/63e7cd38ed29dd9f1881d5119f272c898ca92536cdb53ffe0843197f6c85/multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6", size = 125519 }, + { url = "https://files.pythonhosted.org/packages/38/e0/4f5855037a72cd8a7a2f60a3952d9aa45feedb37ae7831642102604e8a37/multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81", size = 26426 }, + { url = "https://files.pythonhosted.org/packages/7e/a5/17ee3a4db1e310b7405f5d25834460073a8ccd86198ce044dfaf69eac073/multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774", size = 28531 }, { url = "https://files.pythonhosted.org/packages/99/b7/b9e70fde2c0f0c9af4cc5277782a89b66d35948ea3369ec9f598358c3ac5/multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506", size = 10051 }, ] @@ -2342,11 +2517,8 @@ dependencies = [ ] sdist = { url = "https://files.pythonhosted.org/packages/b5/ae/04f39c5d0d0def03247c2893d6f2b83c136bf3320a2154d7b8858f2ba72d/multiprocess-0.70.16.tar.gz", hash = "sha256:161af703d4652a0e1410be6abccecde4a7ddffd19341be0a7011b94aeb171ac1", size = 1772603 } wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/f7/7ec7fddc92e50714ea3745631f79bd9c96424cb2702632521028e57d3a36/multiprocess-0.70.16-py310-none-any.whl", hash = "sha256:c4a9944c67bd49f823687463660a2d6daae94c289adff97e0f9d696ba6371d02", size = 134824 }, { url = "https://files.pythonhosted.org/packages/50/15/b56e50e8debaf439f44befec5b2af11db85f6e0f344c3113ae0be0593a91/multiprocess-0.70.16-py311-none-any.whl", hash = "sha256:af4cabb0dac72abfb1e794fa7855c325fd2b55a10a44628a3c1ad3311c04127a", size = 143519 }, { url = "https://files.pythonhosted.org/packages/0a/7d/a988f258104dcd2ccf1ed40fdc97e26c4ac351eeaf81d76e266c52d84e2f/multiprocess-0.70.16-py312-none-any.whl", hash = "sha256:fc0544c531920dde3b00c29863377f87e1632601092ea2daca74e4beb40faa2e", size = 146741 }, - { url = "https://files.pythonhosted.org/packages/ea/89/38df130f2c799090c978b366cfdf5b96d08de5b29a4a293df7f7429fa50b/multiprocess-0.70.16-py38-none-any.whl", hash = "sha256:a71d82033454891091a226dfc319d0cfa8019a4e888ef9ca910372a446de4435", size = 132628 }, - { url = "https://files.pythonhosted.org/packages/da/d9/f7f9379981e39b8c2511c9e0326d212accacb82f12fbfdc1aa2ce2a7b2b6/multiprocess-0.70.16-py39-none-any.whl", hash = "sha256:a0bafd3ae1b732eac64be2e72038231c1ba97724b60b09400d68f229fcc2fbf3", size = 133351 }, ] [[package]] @@ -2446,6 +2618,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 }, { url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 }, + { url = "https://files.pythonhosted.org/packages/e2/2a/4f27ca96232e8b5269074a72e03b4e0d43aa68c9b965058b1684d07c6ff8/nvidia_cublas_cu12-12.4.5.8-py3-none-win_amd64.whl", hash = "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc", size = 396895858 }, ] [[package]] @@ -2455,6 +2628,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 }, { url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 }, + { url = "https://files.pythonhosted.org/packages/f3/79/8cf313ec17c58ccebc965568e5bcb265cdab0a1df99c4e674bb7a3b99bfe/nvidia_cuda_cupti_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922", size = 9938035 }, ] [[package]] @@ -2464,6 +2638,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 }, { url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 }, + { url = "https://files.pythonhosted.org/packages/7c/30/8c844bfb770f045bcd8b2c83455c5afb45983e1a8abf0c4e5297b481b6a5/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec", size = 19751955 }, ] [[package]] @@ -2473,6 +2648,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 }, { url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 }, + { url = "https://files.pythonhosted.org/packages/a8/8b/450e93fab75d85a69b50ea2d5fdd4ff44541e0138db16f9cd90123ef4de4/nvidia_cuda_runtime_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e", size = 878808 }, ] [[package]] @@ -2484,6 +2660,7 @@ dependencies = [ ] wheels = [ { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 }, + { url = "https://files.pythonhosted.org/packages/3f/d0/f90ee6956a628f9f04bf467932c0a25e5a7e706a684b896593c06c82f460/nvidia_cudnn_cu12-9.1.0.70-py3-none-win_amd64.whl", hash = "sha256:6278562929433d68365a07a4a1546c237ba2849852c0d4b2262a486e805b977a", size = 679925892 }, ] [[package]] @@ -2496,6 +2673,7 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 }, { url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 }, + { url = "https://files.pythonhosted.org/packages/f6/ee/3f3f8e9874f0be5bbba8fb4b62b3de050156d159f8b6edc42d6f1074113b/nvidia_cufft_cu12-11.2.1.3-py3-none-win_amd64.whl", hash = "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b", size = 210576476 }, ] [[package]] @@ -2505,6 +2683,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 }, { url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 }, + { url = "https://files.pythonhosted.org/packages/1c/22/2573503d0d4e45673c263a313f79410e110eb562636b0617856fdb2ff5f6/nvidia_curand_cu12-10.3.5.147-py3-none-win_amd64.whl", hash = "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771", size = 55799918 }, ] [[package]] @@ -2519,6 +2698,7 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 }, { url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 }, + { url = "https://files.pythonhosted.org/packages/f2/be/d435b7b020e854d5d5a682eb5de4328fd62f6182507406f2818280e206e2/nvidia_cusolver_cu12-11.6.1.9-py3-none-win_amd64.whl", hash = "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c", size = 125224015 }, ] [[package]] @@ -2531,6 +2711,7 @@ dependencies = [ wheels = [ { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 }, { url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 }, + { url = "https://files.pythonhosted.org/packages/a2/e0/3155ca539760a8118ec94cc279b34293309bcd14011fc724f87f31988843/nvidia_cusparse_cu12-12.3.1.170-py3-none-win_amd64.whl", hash = "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f", size = 204684315 }, ] [[package]] @@ -2548,6 +2729,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 }, { url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 }, + { url = "https://files.pythonhosted.org/packages/81/19/0babc919031bee42620257b9a911c528f05fb2688520dcd9ca59159ffea8/nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1", size = 95336325 }, ] [[package]] @@ -2557,6 +2739,7 @@ source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 }, { url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 }, + { url = "https://files.pythonhosted.org/packages/54/1b/f77674fbb73af98843be25803bbd3b9a4f0a96c75b8d33a2854a5c7d2d77/nvidia_nvtx_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485", size = 66307 }, ] [[package]] @@ -2617,6 +2800,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/47/42/2f71f5680834688a9c81becbe5c5bb996fd33eaed5c66ae0606c3b1d6a02/onnxruntime-1.20.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bb71a814f66517a65628c9e4a2bb530a6edd2cd5d87ffa0af0f6f773a027d99e", size = 13333903 }, { url = "https://files.pythonhosted.org/packages/c8/f1/aabfdf91d013320aa2fc46cf43c88ca0182860ff15df872b4552254a9680/onnxruntime-1.20.1-cp312-cp312-win32.whl", hash = "sha256:bd386cc9ee5f686ee8a75ba74037750aca55183085bf1941da8efcfe12d5b120", size = 9814562 }, { url = "https://files.pythonhosted.org/packages/dd/80/76979e0b744307d488c79e41051117634b956612cc731f1028eb17ee7294/onnxruntime-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:19c2d843eb074f385e8bbb753a40df780511061a63f9def1b216bf53860223fb", size = 11331482 }, + { url = "https://files.pythonhosted.org/packages/f7/71/c5d980ac4189589267a06f758bd6c5667d07e55656bed6c6c0580733ad07/onnxruntime-1.20.1-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:cc01437a32d0042b606f462245c8bbae269e5442797f6213e36ce61d5abdd8cc", size = 31007574 }, + { url = "https://files.pythonhosted.org/packages/81/0d/13bbd9489be2a6944f4a940084bfe388f1100472f38c07080a46fbd4ab96/onnxruntime-1.20.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb44b08e017a648924dbe91b82d89b0c105b1adcfe31e90d1dc06b8677ad37be", size = 11951459 }, + { url = "https://files.pythonhosted.org/packages/c0/ea/4454ae122874fd52bbb8a961262de81c5f932edeb1b72217f594c700d6ef/onnxruntime-1.20.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bda6aebdf7917c1d811f21d41633df00c58aff2bef2f598f69289c1f1dabc4b3", size = 13331620 }, + { url = "https://files.pythonhosted.org/packages/d8/e0/50db43188ca1c945decaa8fc2a024c33446d31afed40149897d4f9de505f/onnxruntime-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:d30367df7e70f1d9fc5a6a68106f5961686d39b54d3221f760085524e8d38e16", size = 11331758 }, + { url = "https://files.pythonhosted.org/packages/d8/55/3821c5fd60b52a6c82a00bba18531793c93c4addfe64fbf061e235c5617a/onnxruntime-1.20.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9158465745423b2b5d97ed25aa7740c7d38d2993ee2e5c3bfacb0c4145c49d8", size = 11950342 }, + { url = "https://files.pythonhosted.org/packages/14/56/fd990ca222cef4f9f4a9400567b9a15b220dee2eafffb16b2adbc55c8281/onnxruntime-1.20.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0df6f2df83d61f46e842dbcde610ede27218947c33e994545a22333491e72a3b", size = 13337040 }, ] [[package]] @@ -3036,6 +3225,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f8/26/68513e28b3bd1d7633318ed2818e86d1bfc8b782c87c520c7b363092837f/orjson-3.10.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780", size = 129798 }, { url = "https://files.pythonhosted.org/packages/44/ca/020fb99c98ff7267ba18ce798ff0c8c3aa97cd949b611fc76cad3c87e534/orjson-3.10.14-cp312-cp312-win32.whl", hash = "sha256:d5075c54edf1d6ad81d4c6523ce54a748ba1208b542e54b97d8a882ecd810fd1", size = 142524 }, { url = "https://files.pythonhosted.org/packages/70/7f/f2d346819a273653825e7c92dc26418c8da506003c9fc1dfe8157e733b2e/orjson-3.10.14-cp312-cp312-win_amd64.whl", hash = "sha256:175cafd322e458603e8ce73510a068d16b6e6f389c13f69bf16de0e843d7d406", size = 133663 }, + { url = "https://files.pythonhosted.org/packages/46/bb/f1b037d89f580c79eda0940772384cc226a697be1cb4eb94ae4e792aa34c/orjson-3.10.14-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:0905ca08a10f7e0e0c97d11359609300eb1437490a7f32bbaa349de757e2e0c7", size = 249333 }, + { url = "https://files.pythonhosted.org/packages/e4/72/12958a073cace3f8acef0f9a30739d95f46bbb1544126fecad11527d4508/orjson-3.10.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92d13292249f9f2a3e418cbc307a9fbbef043c65f4bd8ba1eb620bc2aaba3d15", size = 125038 }, + { url = "https://files.pythonhosted.org/packages/c0/ae/461f78b1c98de1bc034af88bc21c6a792cc63373261fbc10a6ee560814fa/orjson-3.10.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90937664e776ad316d64251e2fa2ad69265e4443067668e4727074fe39676414", size = 130604 }, + { url = "https://files.pythonhosted.org/packages/ae/d2/17f50513f56bff7898840fddf7fb88f501305b9b2605d2793ff224789665/orjson-3.10.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9ed3d26c4cb4f6babaf791aa46a029265850e80ec2a566581f5c2ee1a14df4f1", size = 130756 }, + { url = "https://files.pythonhosted.org/packages/fa/bc/673856e4af94c9890dfd8e2054c05dc2ddc16d1728c2aa0c5bd198943105/orjson-3.10.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:56ee546c2bbe9599aba78169f99d1dc33301853e897dbaf642d654248280dc6e", size = 414613 }, + { url = "https://files.pythonhosted.org/packages/09/01/08c5b69b0756dd1790fcffa569d6a28dedcd7b97f825e4b46537b788908c/orjson-3.10.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:901e826cb2f1bdc1fcef3ef59adf0c451e8f7c0b5deb26c1a933fb66fb505eae", size = 141010 }, + { url = "https://files.pythonhosted.org/packages/5b/98/72883bb6cf88fd364996e62d2026622ca79bfb8dbaf96ccdd2018ada25b1/orjson-3.10.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26336c0d4b2d44636e1e1e6ed1002f03c6aae4a8a9329561c8883f135e9ff010", size = 129732 }, + { url = "https://files.pythonhosted.org/packages/e4/99/347418f7ef56dcb478ba131a6112b8ddd5b747942652b6e77a53155a7e21/orjson-3.10.14-cp313-cp313-win32.whl", hash = "sha256:e2bc525e335a8545c4e48f84dd0328bc46158c9aaeb8a1c2276546e94540ea3d", size = 142504 }, + { url = "https://files.pythonhosted.org/packages/59/ac/5e96cad01083015f7bfdb02ccafa489da8e6caa7f4c519e215f04d2bd856/orjson-3.10.14-cp313-cp313-win_amd64.whl", hash = "sha256:eca04dfd792cedad53dc9a917da1a522486255360cb4e77619343a20d9f35364", size = 133388 }, ] [[package]] @@ -3082,6 +3280,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, + { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, + { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, + { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, + { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, + { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, + { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, + { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, + { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, + { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, + { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, + { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, + { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, + { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, ] [[package]] @@ -3179,6 +3390,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e7/c4/fc6e86750523f367923522014b821c11ebc5ad402e659d8c9d09b3c9d70c/pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6", size = 2291630 }, { url = "https://files.pythonhosted.org/packages/08/5c/2104299949b9d504baf3f4d35f73dbd14ef31bbd1ddc2c1b66a5b7dfda44/pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf", size = 2626369 }, { url = "https://files.pythonhosted.org/packages/37/f3/9b18362206b244167c958984b57c7f70a0289bfb59a530dd8af5f699b910/pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5", size = 2375240 }, + { url = "https://files.pythonhosted.org/packages/b3/31/9ca79cafdce364fd5c980cd3416c20ce1bebd235b470d262f9d24d810184/pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc", size = 3226640 }, + { url = "https://files.pythonhosted.org/packages/ac/0f/ff07ad45a1f172a497aa393b13a9d81a32e1477ef0e869d030e3c1532521/pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0", size = 3101437 }, + { url = "https://files.pythonhosted.org/packages/08/2f/9906fca87a68d29ec4530be1f893149e0cb64a86d1f9f70a7cfcdfe8ae44/pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1", size = 4326605 }, + { url = "https://files.pythonhosted.org/packages/b0/0f/f3547ee15b145bc5c8b336401b2d4c9d9da67da9dcb572d7c0d4103d2c69/pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec", size = 4411173 }, + { url = "https://files.pythonhosted.org/packages/b1/df/bf8176aa5db515c5de584c5e00df9bab0713548fd780c82a86cba2c2fedb/pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5", size = 4369145 }, + { url = "https://files.pythonhosted.org/packages/de/7c/7433122d1cfadc740f577cb55526fdc39129a648ac65ce64db2eb7209277/pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114", size = 4496340 }, + { url = "https://files.pythonhosted.org/packages/25/46/dd94b93ca6bd555588835f2504bd90c00d5438fe131cf01cfa0c5131a19d/pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352", size = 4296906 }, + { url = "https://files.pythonhosted.org/packages/a8/28/2f9d32014dfc7753e586db9add35b8a41b7a3b46540e965cb6d6bc607bd2/pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3", size = 4431759 }, + { url = "https://files.pythonhosted.org/packages/33/48/19c2cbe7403870fbe8b7737d19eb013f46299cdfe4501573367f6396c775/pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9", size = 2291657 }, + { url = "https://files.pythonhosted.org/packages/3b/ad/285c556747d34c399f332ba7c1a595ba245796ef3e22eae190f5364bb62b/pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c", size = 2626304 }, + { url = "https://files.pythonhosted.org/packages/e5/7b/ef35a71163bf36db06e9c8729608f78dedf032fc8313d19bd4be5c2588f3/pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65", size = 2375117 }, + { url = "https://files.pythonhosted.org/packages/79/30/77f54228401e84d6791354888549b45824ab0ffde659bafa67956303a09f/pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861", size = 3230060 }, + { url = "https://files.pythonhosted.org/packages/ce/b1/56723b74b07dd64c1010fee011951ea9c35a43d8020acd03111f14298225/pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081", size = 3106192 }, + { url = "https://files.pythonhosted.org/packages/e1/cd/7bf7180e08f80a4dcc6b4c3a0aa9e0b0ae57168562726a05dc8aa8fa66b0/pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c", size = 4446805 }, + { url = "https://files.pythonhosted.org/packages/97/42/87c856ea30c8ed97e8efbe672b58c8304dee0573f8c7cab62ae9e31db6ae/pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547", size = 4530623 }, + { url = "https://files.pythonhosted.org/packages/ff/41/026879e90c84a88e33fb00cc6bd915ac2743c67e87a18f80270dfe3c2041/pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab", size = 4465191 }, + { url = "https://files.pythonhosted.org/packages/e5/fb/a7960e838bc5df57a2ce23183bfd2290d97c33028b96bde332a9057834d3/pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9", size = 2295494 }, + { url = "https://files.pythonhosted.org/packages/d7/6c/6ec83ee2f6f0fda8d4cf89045c6be4b0373ebfc363ba8538f8c999f63fcd/pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe", size = 2631595 }, + { url = "https://files.pythonhosted.org/packages/cf/6c/41c21c6c8af92b9fea313aa47c75de49e2f9a467964ee33eb0135d47eb64/pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756", size = 2377651 }, ] [[package]] @@ -3281,6 +3511,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/2f/6b32f273fa02e978b7577159eae7471b3cfb88b48563b1c2578b2d7ca0bb/propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518", size = 230704 }, { url = "https://files.pythonhosted.org/packages/5c/2e/f40ae6ff5624a5f77edd7b8359b208b5455ea113f68309e2b00a2e1426b6/propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246", size = 40050 }, { url = "https://files.pythonhosted.org/packages/3b/77/a92c3ef994e47180862b9d7d11e37624fb1c00a16d61faf55115d970628b/propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1", size = 44117 }, + { url = "https://files.pythonhosted.org/packages/0f/2a/329e0547cf2def8857157f9477669043e75524cc3e6251cef332b3ff256f/propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc", size = 77002 }, + { url = "https://files.pythonhosted.org/packages/12/2d/c4df5415e2382f840dc2ecbca0eeb2293024bc28e57a80392f2012b4708c/propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9", size = 44639 }, + { url = "https://files.pythonhosted.org/packages/d0/5a/21aaa4ea2f326edaa4e240959ac8b8386ea31dedfdaa636a3544d9e7a408/propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439", size = 44049 }, + { url = "https://files.pythonhosted.org/packages/4e/3e/021b6cd86c0acc90d74784ccbb66808b0bd36067a1bf3e2deb0f3845f618/propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536", size = 224819 }, + { url = "https://files.pythonhosted.org/packages/3c/57/c2fdeed1b3b8918b1770a133ba5c43ad3d78e18285b0c06364861ef5cc38/propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629", size = 229625 }, + { url = "https://files.pythonhosted.org/packages/9d/81/70d4ff57bf2877b5780b466471bebf5892f851a7e2ca0ae7ffd728220281/propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b", size = 232934 }, + { url = "https://files.pythonhosted.org/packages/3c/b9/bb51ea95d73b3fb4100cb95adbd4e1acaf2cbb1fd1083f5468eeb4a099a8/propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052", size = 227361 }, + { url = "https://files.pythonhosted.org/packages/f1/20/3c6d696cd6fd70b29445960cc803b1851a1131e7a2e4ee261ee48e002bcd/propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce", size = 213904 }, + { url = "https://files.pythonhosted.org/packages/a1/cb/1593bfc5ac6d40c010fa823f128056d6bc25b667f5393781e37d62f12005/propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d", size = 212632 }, + { url = "https://files.pythonhosted.org/packages/6d/5c/e95617e222be14a34c709442a0ec179f3207f8a2b900273720501a70ec5e/propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce", size = 207897 }, + { url = "https://files.pythonhosted.org/packages/8e/3b/56c5ab3dc00f6375fbcdeefdede5adf9bee94f1fab04adc8db118f0f9e25/propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95", size = 208118 }, + { url = "https://files.pythonhosted.org/packages/86/25/d7ef738323fbc6ebcbce33eb2a19c5e07a89a3df2fded206065bd5e868a9/propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf", size = 217851 }, + { url = "https://files.pythonhosted.org/packages/b3/77/763e6cef1852cf1ba740590364ec50309b89d1c818e3256d3929eb92fabf/propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f", size = 222630 }, + { url = "https://files.pythonhosted.org/packages/4f/e9/0f86be33602089c701696fbed8d8c4c07b6ee9605c5b7536fd27ed540c5b/propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30", size = 216269 }, + { url = "https://files.pythonhosted.org/packages/cc/02/5ac83217d522394b6a2e81a2e888167e7ca629ef6569a3f09852d6dcb01a/propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6", size = 39472 }, + { url = "https://files.pythonhosted.org/packages/f4/33/d6f5420252a36034bc8a3a01171bc55b4bff5df50d1c63d9caa50693662f/propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1", size = 43363 }, { url = "https://files.pythonhosted.org/packages/41/b6/c5319caea262f4821995dca2107483b94a3345d4607ad797c76cb9c36bcc/propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54", size = 11818 }, ] @@ -3316,6 +3562,8 @@ version = "6.1.1" source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/d4/8095b53c4950f44dc99b8d983b796f405ae1f58d80978fcc0421491b4201/psutil-6.1.1-cp27-none-win32.whl", hash = "sha256:6d4281f5bbca041e2292be3380ec56a9413b790579b8e593b1784499d0005dac", size = 246855 }, + { url = "https://files.pythonhosted.org/packages/b1/63/0b6425ea4f2375988209a9934c90d6079cc7537847ed58a28fbe30f4277e/psutil-6.1.1-cp27-none-win_amd64.whl", hash = "sha256:c777eb75bb33c47377c9af68f30e9f11bc78e0f07fbf907be4a5d70b2fe5f030", size = 250110 }, { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, @@ -3386,6 +3634,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b6/1f/966b722251a7354114ccbb71cf1a83922023e69efd8945ebf628a851ec4c/pyarrow-19.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a08e2a8a039a3f72afb67a6668180f09fddaa38fe0d21f13212b4aba4b5d2451", size = 40505858 }, { url = "https://files.pythonhosted.org/packages/3b/5e/6bc81aa7fc9affc7d1c03b912fbcc984ca56c2a18513684da267715dab7b/pyarrow-19.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f43f5aef2a13d4d56adadae5720d1fed4c1356c993eda8b59dace4b5983843c1", size = 42084973 }, { url = "https://files.pythonhosted.org/packages/53/c3/2f56da818b6a4758cbd514957c67bd0f078ebffa5390ee2e2bf0f9e8defc/pyarrow-19.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f672f5364b2d7829ef7c94be199bb88bf5661dd485e21d2d37de12ccb78a136", size = 25241976 }, + { url = "https://files.pythonhosted.org/packages/f5/b9/ba07ed3dd6b6e4f379b78e9c47c50c8886e07862ab7fa6339ac38622d755/pyarrow-19.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:cf3bf0ce511b833f7bc5f5bb3127ba731e97222023a444b7359f3a22e2a3b463", size = 30651291 }, + { url = "https://files.pythonhosted.org/packages/ad/10/0d304243c8277035298a68a70807efb76199c6c929bb3363c92ac9be6a0d/pyarrow-19.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:4d8b0c0de0a73df1f1bf439af1b60f273d719d70648e898bc077547649bb8352", size = 32100461 }, + { url = "https://files.pythonhosted.org/packages/8a/61/bcfc5182e11831bca3f849945b9b106e09fd10ded773dff466658e972a45/pyarrow-19.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92aff08e23d281c69835e4a47b80569242a504095ef6a6223c1f6bb8883431d", size = 41132491 }, + { url = "https://files.pythonhosted.org/packages/8e/87/2915a29049ec352dc69a967fbcbd76b0180319233de0daf8bd368df37099/pyarrow-19.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3b78eff5968a1889a0f3bc81ca57e1e19b75f664d9c61a42a604bf9d8402aae", size = 42192529 }, + { url = "https://files.pythonhosted.org/packages/48/18/44e5542b2707a8afaf78b5b88c608f261871ae77787eac07b7c679ca6f0f/pyarrow-19.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b34d3bde38eba66190b215bae441646330f8e9da05c29e4b5dd3e41bde701098", size = 40495363 }, + { url = "https://files.pythonhosted.org/packages/ba/d6/5096deb7599bbd20bc2768058fe23bc725b88eb41bee58303293583a2935/pyarrow-19.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5418d4d0fab3a0ed497bad21d17a7973aad336d66ad4932a3f5f7480d4ca0c04", size = 42074075 }, + { url = "https://files.pythonhosted.org/packages/2c/df/e3c839c04c284c9ec3d62b02a8c452b795d9b07b04079ab91ce33484d4c5/pyarrow-19.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e82c3d5e44e969c217827b780ed8faf7ac4c53f934ae9238872e749fa531f7c9", size = 25239803 }, + { url = "https://files.pythonhosted.org/packages/6a/d3/a6d4088e906c7b5d47792256212606d2ae679046dc750eee0ae167338e5c/pyarrow-19.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f208c3b58a6df3b239e0bb130e13bc7487ed14f39a9ff357b6415e3f6339b560", size = 30695401 }, + { url = "https://files.pythonhosted.org/packages/94/25/70040fd0e397dd1b937f459eaeeec942a76027357491dca0ada09d1322af/pyarrow-19.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:c751c1c93955b7a84c06794df46f1cec93e18610dcd5ab7d08e89a81df70a849", size = 32104680 }, + { url = "https://files.pythonhosted.org/packages/4e/f9/92783290cc0d80ca16d34b0c126305bfacca4b87dd889c8f16c6ef2a8fd7/pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b903afaa5df66d50fc38672ad095806443b05f202c792694f3a604ead7c6ea6e", size = 41076754 }, + { url = "https://files.pythonhosted.org/packages/05/46/2c9870f50a495c72e2b8982ae29a9b1680707ea936edc0de444cec48f875/pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22a4bc0937856263df8b94f2f2781b33dd7f876f787ed746608e06902d691a5", size = 42163133 }, + { url = "https://files.pythonhosted.org/packages/7b/2f/437922b902549228fb15814e8a26105bff2787ece466a8d886eb6699efad/pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:5e8a28b918e2e878c918f6d89137386c06fe577cd08d73a6be8dafb317dc2d73", size = 40452210 }, + { url = "https://files.pythonhosted.org/packages/36/ef/1d7975053af9d106da973bac142d0d4da71b7550a3576cc3e0b3f444d21a/pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:29cd86c8001a94f768f79440bf83fee23963af5e7bc68ce3a7e5f120e17edf89", size = 42077618 }, ] [[package]] @@ -3427,6 +3688,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/65/cb014acc41cd5bf6bbfa4671c7faffffb9cee01706642c2dec70c5209ac8/pyclipper-1.3.0.post6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58eae2ff92a8cae1331568df076c4c5775bf946afab0068b217f0cf8e188eb3c", size = 963797 }, { url = "https://files.pythonhosted.org/packages/80/ec/b40cd81ab7598984167508a5369a2fa31a09fe3b3e3d0b73aa50e06d4b3f/pyclipper-1.3.0.post6-cp312-cp312-win32.whl", hash = "sha256:793b0aa54b914257aa7dc76b793dd4dcfb3c84011d48df7e41ba02b571616eaf", size = 99456 }, { url = "https://files.pythonhosted.org/packages/24/3a/7d6292e3c94fb6b872d8d7e80d909dc527ee6b0af73b753c63fdde65a7da/pyclipper-1.3.0.post6-cp312-cp312-win_amd64.whl", hash = "sha256:d3f9da96f83b8892504923beb21a481cd4516c19be1d39eb57a92ef1c9a29548", size = 110278 }, + { url = "https://files.pythonhosted.org/packages/8c/b3/75232906bd13f869600d23bdb8fe6903cc899fa7e96981ae4c9b7d9c409e/pyclipper-1.3.0.post6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f129284d2c7bcd213d11c0f35e1ae506a1144ce4954e9d1734d63b120b0a1b58", size = 268254 }, + { url = "https://files.pythonhosted.org/packages/0b/db/35843050a3dd7586781497a21ca6c8d48111afb66061cb40c3d3c288596d/pyclipper-1.3.0.post6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:188fbfd1d30d02247f92c25ce856f5f3c75d841251f43367dbcf10935bc48f38", size = 142204 }, + { url = "https://files.pythonhosted.org/packages/7c/d7/1faa0ff35caa02cb32cb0583688cded3f38788f33e02bfe6461fbcc1bee1/pyclipper-1.3.0.post6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6d129d0c2587f2f5904d201a4021f859afbb45fada4261c9fdedb2205b09d23", size = 943835 }, + { url = "https://files.pythonhosted.org/packages/31/10/c0bf140bee2844e2c0617fdcc8a4e8daf98e71710046b06034e6f1963404/pyclipper-1.3.0.post6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c9c80b5c46eef38ba3f12dd818dc87f5f2a0853ba914b6f91b133232315f526", size = 962510 }, + { url = "https://files.pythonhosted.org/packages/85/6f/8c6afc49b51b1bf16d5903ecd5aee657cf88f52c83cb5fabf771deeba728/pyclipper-1.3.0.post6-cp313-cp313-win32.whl", hash = "sha256:b15113ec4fc423b58e9ae80aa95cf5a0802f02d8f02a98a46af3d7d66ff0cc0e", size = 98836 }, + { url = "https://files.pythonhosted.org/packages/d5/19/9ff4551b42f2068686c50c0d199072fa67aee57fc5cf86770cacf71efda3/pyclipper-1.3.0.post6-cp313-cp313-win_amd64.whl", hash = "sha256:e5ff68fa770ac654c7974fc78792978796f068bd274e95930c0691c31e192889", size = 109672 }, ] [[package]] @@ -3485,6 +3752,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/b9/41f7efe80f6ce2ed3ee3c2dcfe10ab7adc1172f778cc9659509a79518c43/pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24", size = 2116872 }, { url = "https://files.pythonhosted.org/packages/63/08/b59b7a92e03dd25554b0436554bf23e7c29abae7cce4b1c459cd92746811/pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84", size = 1738535 }, { url = "https://files.pythonhosted.org/packages/88/8d/479293e4d39ab409747926eec4329de5b7129beaedc3786eca070605d07f/pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9", size = 1917992 }, + { url = "https://files.pythonhosted.org/packages/ad/ef/16ee2df472bf0e419b6bc68c05bf0145c49247a1095e85cee1463c6a44a1/pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc", size = 1856143 }, + { url = "https://files.pythonhosted.org/packages/da/fa/bc3dbb83605669a34a93308e297ab22be82dfb9dcf88c6cf4b4f264e0a42/pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd", size = 1770063 }, + { url = "https://files.pythonhosted.org/packages/4e/48/e813f3bbd257a712303ebdf55c8dc46f9589ec74b384c9f652597df3288d/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05", size = 1790013 }, + { url = "https://files.pythonhosted.org/packages/b4/e0/56eda3a37929a1d297fcab1966db8c339023bcca0b64c5a84896db3fcc5c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d", size = 1801077 }, + { url = "https://files.pythonhosted.org/packages/04/be/5e49376769bfbf82486da6c5c1683b891809365c20d7c7e52792ce4c71f3/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510", size = 1996782 }, + { url = "https://files.pythonhosted.org/packages/bc/24/e3ee6c04f1d58cc15f37bcc62f32c7478ff55142b7b3e6d42ea374ea427c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6", size = 2661375 }, + { url = "https://files.pythonhosted.org/packages/c1/f8/11a9006de4e89d016b8de74ebb1db727dc100608bb1e6bbe9d56a3cbbcce/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b", size = 2071635 }, + { url = "https://files.pythonhosted.org/packages/7c/45/bdce5779b59f468bdf262a5bc9eecbae87f271c51aef628d8c073b4b4b4c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327", size = 1916994 }, + { url = "https://files.pythonhosted.org/packages/d8/fa/c648308fe711ee1f88192cad6026ab4f925396d1293e8356de7e55be89b5/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6", size = 1968877 }, + { url = "https://files.pythonhosted.org/packages/16/16/b805c74b35607d24d37103007f899abc4880923b04929547ae68d478b7f4/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f", size = 2116814 }, + { url = "https://files.pythonhosted.org/packages/d1/58/5305e723d9fcdf1c5a655e6a4cc2a07128bf644ff4b1d98daf7a9dbf57da/pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769", size = 1738360 }, + { url = "https://files.pythonhosted.org/packages/a5/ae/e14b0ff8b3f48e02394d8acd911376b7b66e164535687ef7dc24ea03072f/pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5", size = 1919411 }, ] [[package]] @@ -3590,6 +3869,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c1/66/e98b2308971d45667cb8179d4d66deca47336c90663a7e0527589f1038b7/pymongo-4.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e974ab16a60be71a8dfad4e5afccf8dd05d41c758060f5d5bda9a758605d9a5d", size = 1862230 }, { url = "https://files.pythonhosted.org/packages/6c/80/ba9b7ed212a5f8cf8ad7037ed5bbebc1c587fc09242108f153776e4a338b/pymongo-4.10.1-cp312-cp312-win32.whl", hash = "sha256:544890085d9641f271d4f7a47684450ed4a7344d6b72d5968bfae32203b1bb7c", size = 903045 }, { url = "https://files.pythonhosted.org/packages/76/8b/5afce891d78159912c43726fab32641e3f9718f14be40f978c148ea8db48/pymongo-4.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:dcc07b1277e8b4bf4d7382ca133850e323b7ab048b8353af496d050671c7ac52", size = 926686 }, + { url = "https://files.pythonhosted.org/packages/83/76/df0fd0622a85b652ad0f91ec8a0ebfd0cb86af6caec8999a22a1f7481203/pymongo-4.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:90bc6912948dfc8c363f4ead54d54a02a15a7fee6cfafb36dc450fc8962d2cb7", size = 996981 }, + { url = "https://files.pythonhosted.org/packages/4c/39/fa50531de8d1d8af8c253caeed20c18ccbf1de5d970119c4a42c89f2bd09/pymongo-4.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:594dd721b81f301f33e843453638e02d92f63c198358e5a0fa8b8d0b1218dabc", size = 996769 }, + { url = "https://files.pythonhosted.org/packages/bf/50/6936612c1b2e32d95c30e860552d3bc9e55cfa79a4f73b73225fa05a028c/pymongo-4.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0783e0c8e95397c84e9cf8ab092ab1e5dd7c769aec0ef3a5838ae7173b98dea0", size = 2169159 }, + { url = "https://files.pythonhosted.org/packages/78/8c/45cb23096e66c7b1da62bb8d9c7ac2280e7c1071e13841e7fb71bd44fd9f/pymongo-4.10.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fb6a72e88df46d1c1040fd32cd2d2c5e58722e5d3e31060a0393f04ad3283de", size = 2260569 }, + { url = "https://files.pythonhosted.org/packages/29/b6/e5ec697087e527a6a15c5f8daa5bcbd641edb8813487345aaf963d3537dc/pymongo-4.10.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e3a593333e20c87415420a4fb76c00b7aae49b6361d2e2205b6fece0563bf40", size = 2218142 }, + { url = "https://files.pythonhosted.org/packages/ad/8a/c0b45bee0f0c57732c5c36da5122c1796efd5a62d585fbc504e2f1401244/pymongo-4.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72e2ace7456167c71cfeca7dcb47bd5dceda7db2231265b80fc625c5e8073186", size = 2170623 }, + { url = "https://files.pythonhosted.org/packages/3b/26/6c0a5360a571df24c9bfbd51b1dae279f4f0c511bdbc0906f6df6d1543fa/pymongo-4.10.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ad05eb9c97e4f589ed9e74a00fcaac0d443ccd14f38d1258eb4c39a35dd722b", size = 2111112 }, + { url = "https://files.pythonhosted.org/packages/38/bc/5b91b728e1cf505d931f04e24cbac71ae519523785570ed046cdc31e6efc/pymongo-4.10.1-cp313-cp313-win32.whl", hash = "sha256:ee4c86d8e6872a61f7888fc96577b0ea165eb3bdb0d841962b444fa36001e2bb", size = 948727 }, + { url = "https://files.pythonhosted.org/packages/0d/2a/7c24a6144eaa06d18ed52822ea2b0f119fd9267cd1abbb75dae4d89a3803/pymongo-4.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:45ee87a4e12337353242bc758accc7fb47a2f2d9ecc0382a61e64c8f01e86708", size = 976873 }, ] [[package]] @@ -3825,6 +4113,9 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, + { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, + { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, + { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, ] [[package]] @@ -3860,6 +4151,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, ] [[package]] @@ -3928,6 +4228,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/66/5d/5dc02c87d9a0e64e0abd728d3255ddce8475e06b6be3f732a460f0a360c9/rapidfuzz-3.11.0-cp312-cp312-win32.whl", hash = "sha256:ba26d87fe7fcb56c4a53b549a9e0e9143f6b0df56d35fe6ad800c902447acd5b", size = 1824882 }, { url = "https://files.pythonhosted.org/packages/b7/da/a37d532cbefd7242191abf18f438b315bf5c72d742f78414a8ec1b7396cf/rapidfuzz-3.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:b1f7efdd7b7adb32102c2fa481ad6f11923e2deb191f651274be559d56fc913b", size = 1606419 }, { url = "https://files.pythonhosted.org/packages/92/d0/1406d6e110aff87303e98f47adc5e76ef2e69d51cdd08b2d463520158cab/rapidfuzz-3.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:ed78c8e94f57b44292c1a0350f580e18d3a3c5c0800e253f1583580c1b417ad2", size = 858655 }, + { url = "https://files.pythonhosted.org/packages/8a/30/984f1013d28b88304386c8e70b5d63db4765c28be8d9ef68d177c9addc77/rapidfuzz-3.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e60814edd0c9b511b5f377d48b9782b88cfe8be07a98f99973669299c8bb318a", size = 1931354 }, + { url = "https://files.pythonhosted.org/packages/a4/8a/41d4f95c5742a8a47c0e96c02957f72f8c34411cecde87fe371d5e09807e/rapidfuzz-3.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3f28952da055dbfe75828891cd3c9abf0984edc8640573c18b48c14c68ca5e06", size = 1417918 }, + { url = "https://files.pythonhosted.org/packages/e3/26/031ac8366831da6afc5f25462196eab0e0caf9422c83c007307e23a6f010/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e8f93bc736020351a6f8e71666e1f486bb8bd5ce8112c443a30c77bfde0eb68", size = 1388327 }, + { url = "https://files.pythonhosted.org/packages/17/1b/927edcd3b540770d3d6d52fe079c6bffdb99e9dfa4b73585bee2a8bd6504/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76a4a11ba8f678c9e5876a7d465ab86def047a4fcc043617578368755d63a1bc", size = 5513214 }, + { url = "https://files.pythonhosted.org/packages/0d/a2/c1e4f35e7bfbbd97a665f8cd119d8bd4a085f1721366cd76582dc022131b/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc0e0d41ad8a056a9886bac91ff9d9978e54a244deb61c2972cc76b66752de9c", size = 1638560 }, + { url = "https://files.pythonhosted.org/packages/39/3f/6827972efddb1e357a0b6165ae9e310d7dc5c078af3023893365c212641b/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8ea35f2419c7d56b3e75fbde2698766daedb374f20eea28ac9b1f668ef4f74", size = 1667185 }, + { url = "https://files.pythonhosted.org/packages/cc/5d/6902b93e1273e69ea087afd16e7504099bcb8d712a9f69cb649ea05ca7e1/rapidfuzz-3.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd340bbd025302276b5aa221dccfe43040c7babfc32f107c36ad783f2ffd8775", size = 3107466 }, + { url = "https://files.pythonhosted.org/packages/a6/02/bdb2048c9b8edf4cd82c2e8f6a8ed9af0fbdf91810ca2b36d1be6fc996d8/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:494eef2c68305ab75139034ea25328a04a548d297712d9cf887bf27c158c388b", size = 2302041 }, + { url = "https://files.pythonhosted.org/packages/12/91/0bbe51e3c15c02578487fd10a14692a40677ea974098d8d376bafd627a89/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5a167344c1d6db06915fb0225592afdc24d8bafaaf02de07d4788ddd37f4bc2f", size = 6899969 }, + { url = "https://files.pythonhosted.org/packages/27/9d/09b85adfd5829f60bd6dbe53ba66dad22f93a281d494a5638b5f20fb6a8a/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c7af25bda96ac799378ac8aba54a8ece732835c7b74cfc201b688a87ed11152", size = 2669022 }, + { url = "https://files.pythonhosted.org/packages/cb/07/6fb723963243335c3bf73925914b6998649d642eff550187454d5bb3d077/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d2a0f7e17f33e7890257367a1662b05fecaf56625f7dbb6446227aaa2b86448b", size = 3229475 }, + { url = "https://files.pythonhosted.org/packages/3a/8e/e9af6da2e235aa29ad2bb0a1fc2472b2949ed8d9ff8fb0f05b4bfbbf7675/rapidfuzz-3.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4d0d26c7172bdb64f86ee0765c5b26ea1dc45c52389175888ec073b9b28f4305", size = 4143861 }, + { url = "https://files.pythonhosted.org/packages/fd/d8/4677e36e958b4d95d039d254d597db9c020896c8130911dc36b136373b87/rapidfuzz-3.11.0-cp313-cp313-win32.whl", hash = "sha256:6ad02bab756751c90fa27f3069d7b12146613061341459abf55f8190d899649f", size = 1822624 }, + { url = "https://files.pythonhosted.org/packages/e8/97/1c782140e688ea2c3337d94516c635c575aa39fe62782fd53ad5d2119df4/rapidfuzz-3.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:b1472986fd9c5d318399a01a0881f4a0bf4950264131bb8e2deba9df6d8c362b", size = 1604273 }, + { url = "https://files.pythonhosted.org/packages/a6/83/8b713d50bec947e945a79be47f772484307fc876c426fb26c6f369098389/rapidfuzz-3.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:c408f09649cbff8da76f8d3ad878b64ba7f7abdad1471efb293d2c075e80c822", size = 857385 }, ] [[package]] @@ -4002,6 +4317,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/93/2d/dd56bb76bd8e95bbce684326302f287455b56242a4f9c61f1bc76e28360e/regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad", size = 787692 }, { url = "https://files.pythonhosted.org/packages/0b/55/31877a249ab7a5156758246b9c59539abbeba22461b7d8adc9e8475ff73e/regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54", size = 262135 }, { url = "https://files.pythonhosted.org/packages/38/ec/ad2d7de49a600cdb8dd78434a1aeffe28b9d6fc42eb36afab4a27ad23384/regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b", size = 273567 }, + { url = "https://files.pythonhosted.org/packages/90/73/bcb0e36614601016552fa9344544a3a2ae1809dc1401b100eab02e772e1f/regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84", size = 483525 }, + { url = "https://files.pythonhosted.org/packages/0f/3f/f1a082a46b31e25291d830b369b6b0c5576a6f7fb89d3053a354c24b8a83/regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4", size = 288324 }, + { url = "https://files.pythonhosted.org/packages/09/c9/4e68181a4a652fb3ef5099e077faf4fd2a694ea6e0f806a7737aff9e758a/regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0", size = 284617 }, + { url = "https://files.pythonhosted.org/packages/fc/fd/37868b75eaf63843165f1d2122ca6cb94bfc0271e4428cf58c0616786dce/regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0", size = 795023 }, + { url = "https://files.pythonhosted.org/packages/c4/7c/d4cd9c528502a3dedb5c13c146e7a7a539a3853dc20209c8e75d9ba9d1b2/regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7", size = 833072 }, + { url = "https://files.pythonhosted.org/packages/4f/db/46f563a08f969159c5a0f0e722260568425363bea43bb7ae370becb66a67/regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7", size = 823130 }, + { url = "https://files.pythonhosted.org/packages/db/60/1eeca2074f5b87df394fccaa432ae3fc06c9c9bfa97c5051aed70e6e00c2/regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c", size = 796857 }, + { url = "https://files.pythonhosted.org/packages/10/db/ac718a08fcee981554d2f7bb8402f1faa7e868c1345c16ab1ebec54b0d7b/regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3", size = 784006 }, + { url = "https://files.pythonhosted.org/packages/c2/41/7da3fe70216cea93144bf12da2b87367590bcf07db97604edeea55dac9ad/regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07", size = 781650 }, + { url = "https://files.pythonhosted.org/packages/a7/d5/880921ee4eec393a4752e6ab9f0fe28009435417c3102fc413f3fe81c4e5/regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e", size = 789545 }, + { url = "https://files.pythonhosted.org/packages/dc/96/53770115e507081122beca8899ab7f5ae28ae790bfcc82b5e38976df6a77/regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6", size = 853045 }, + { url = "https://files.pythonhosted.org/packages/31/d3/1372add5251cc2d44b451bd94f43b2ec78e15a6e82bff6a290ef9fd8f00a/regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4", size = 860182 }, + { url = "https://files.pythonhosted.org/packages/ed/e3/c446a64984ea9f69982ba1a69d4658d5014bc7a0ea468a07e1a1265db6e2/regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d", size = 787733 }, + { url = "https://files.pythonhosted.org/packages/2b/f1/e40c8373e3480e4f29f2692bd21b3e05f296d3afebc7e5dcf21b9756ca1c/regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff", size = 262122 }, + { url = "https://files.pythonhosted.org/packages/45/94/bc295babb3062a731f52621cdc992d123111282e291abaf23faa413443ea/regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a", size = 273545 }, ] [[package]] @@ -4151,6 +4481,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a4/f6/ff7beaeb644bcad72bcfd5a03ff36d32ee4e53a8b29a639f11bcb65d06cd/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1061b7c028a8663fb9a1a1baf9317b64a257fcb036dae5c8752b2abef31d136f", size = 12253728 }, { url = "https://files.pythonhosted.org/packages/29/7a/8bce8968883e9465de20be15542f4c7e221952441727c4dad24d534c6d99/scikit_learn-1.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e69fab4ebfc9c9b580a7a80111b43d214ab06250f8a7ef590a4edf72464dd86", size = 13147700 }, { url = "https://files.pythonhosted.org/packages/62/27/585859e72e117fe861c2079bcba35591a84f801e21bc1ab85bce6ce60305/scikit_learn-1.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:70b1d7e85b1c96383f872a519b3375f92f14731e279a7b4c6cfd650cf5dffc52", size = 11110613 }, + { url = "https://files.pythonhosted.org/packages/2e/59/8eb1872ca87009bdcdb7f3cdc679ad557b992c12f4b61f9250659e592c63/scikit_learn-1.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ffa1e9e25b3d93990e74a4be2c2fc61ee5af85811562f1288d5d055880c4322", size = 12010001 }, + { url = "https://files.pythonhosted.org/packages/9d/05/f2fc4effc5b32e525408524c982c468c29d22f828834f0625c5ef3d601be/scikit_learn-1.6.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:dc5cf3d68c5a20ad6d571584c0750ec641cc46aeef1c1507be51300e6003a7e1", size = 11096360 }, + { url = "https://files.pythonhosted.org/packages/c8/e4/4195d52cf4f113573fb8ebc44ed5a81bd511a92c0228889125fac2f4c3d1/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c06beb2e839ecc641366000ca84f3cf6fa9faa1777e29cf0c04be6e4d096a348", size = 12209004 }, + { url = "https://files.pythonhosted.org/packages/94/be/47e16cdd1e7fcf97d95b3cb08bde1abb13e627861af427a3651fcb80b517/scikit_learn-1.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8ca8cb270fee8f1f76fa9bfd5c3507d60c6438bbee5687f81042e2bb98e5a97", size = 13171776 }, + { url = "https://files.pythonhosted.org/packages/34/b0/ca92b90859070a1487827dbc672f998da95ce83edce1270fc23f96f1f61a/scikit_learn-1.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:7a1c43c8ec9fde528d664d947dc4c0789be4077a3647f232869f41d9bf50e0fb", size = 11071865 }, + { url = "https://files.pythonhosted.org/packages/12/ae/993b0fb24a356e71e9a894e42b8a9eec528d4c70217353a1cd7a48bc25d4/scikit_learn-1.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a17c1dea1d56dcda2fac315712f3651a1fea86565b64b48fa1bc090249cbf236", size = 11955804 }, + { url = "https://files.pythonhosted.org/packages/d6/54/32fa2ee591af44507eac86406fa6bba968d1eb22831494470d0a2e4a1eb1/scikit_learn-1.6.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a7aa5f9908f0f28f4edaa6963c0a6183f1911e63a69aa03782f0d924c830a35", size = 11100530 }, + { url = "https://files.pythonhosted.org/packages/3f/58/55856da1adec655bdce77b502e94a267bf40a8c0b89f8622837f89503b5a/scikit_learn-1.6.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0650e730afb87402baa88afbf31c07b84c98272622aaba002559b614600ca691", size = 12433852 }, + { url = "https://files.pythonhosted.org/packages/ff/4f/c83853af13901a574f8f13b645467285a48940f185b690936bb700a50863/scikit_learn-1.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:3f59fe08dc03ea158605170eb52b22a105f238a5d512c4470ddeca71feae8e5f", size = 11337256 }, ] [[package]] @@ -4178,6 +4517,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/3c/0de11ca154e24a57b579fb648151d901326d3102115bc4f9a7a86526ce54/scipy-1.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb57b30f0017d4afa5fe5f5b150b8f807618819287c21cbe51130de7ccdaed2", size = 40249869 }, { url = "https://files.pythonhosted.org/packages/15/09/472e8d0a6b33199d1bb95e49bedcabc0976c3724edd9b0ef7602ccacf41e/scipy-1.15.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:491d57fe89927fa1aafbe260f4cfa5ffa20ab9f1435025045a5315006a91b8f5", size = 42629068 }, { url = "https://files.pythonhosted.org/packages/ff/ba/31c7a8131152822b3a2cdeba76398ffb404d81d640de98287d236da90c49/scipy-1.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:900f3fa3db87257510f011c292a5779eb627043dd89731b9c461cd16ef76ab3d", size = 43621992 }, + { url = "https://files.pythonhosted.org/packages/2b/bf/dd68965a4c5138a630eeed0baec9ae96e5d598887835bdde96cdd2fe4780/scipy-1.15.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:100193bb72fbff37dbd0bf14322314fc7cbe08b7ff3137f11a34d06dc0ee6b85", size = 41441136 }, + { url = "https://files.pythonhosted.org/packages/ef/5e/4928581312922d7e4d416d74c416a660addec4dd5ea185401df2269ba5a0/scipy-1.15.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:2114a08daec64980e4b4cbdf5bee90935af66d750146b1d2feb0d3ac30613692", size = 32533699 }, + { url = "https://files.pythonhosted.org/packages/32/90/03f99c43041852837686898c66767787cd41c5843d7a1509c39ffef683e9/scipy-1.15.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:6b3e71893c6687fc5e29208d518900c24ea372a862854c9888368c0b267387ab", size = 24807289 }, + { url = "https://files.pythonhosted.org/packages/9d/52/bfe82b42ae112eaba1af2f3e556275b8727d55ac6e4932e7aef337a9d9d4/scipy-1.15.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:837299eec3d19b7e042923448d17d95a86e43941104d33f00da7e31a0f715d3c", size = 27929844 }, + { url = "https://files.pythonhosted.org/packages/f6/77/54ff610bad600462c313326acdb035783accc6a3d5f566d22757ad297564/scipy-1.15.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82add84e8a9fb12af5c2c1a3a3f1cb51849d27a580cb9e6bd66226195142be6e", size = 38031272 }, + { url = "https://files.pythonhosted.org/packages/f1/26/98585cbf04c7cf503d7eb0a1966df8a268154b5d923c5fe0c1ed13154c49/scipy-1.15.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070d10654f0cb6abd295bc96c12656f948e623ec5f9a4eab0ddb1466c000716e", size = 40210217 }, + { url = "https://files.pythonhosted.org/packages/fd/3f/3d2285eb6fece8bc5dbb2f9f94d61157d61d155e854fd5fea825b8218f12/scipy-1.15.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55cc79ce4085c702ac31e49b1e69b27ef41111f22beafb9b49fea67142b696c4", size = 42587785 }, + { url = "https://files.pythonhosted.org/packages/48/7d/5b5251984bf0160d6533695a74a5fddb1fa36edd6f26ffa8c871fbd4782a/scipy-1.15.1-cp313-cp313-win_amd64.whl", hash = "sha256:c352c1b6d7cac452534517e022f8f7b8d139cd9f27e6fbd9f3cbd0bfd39f5bef", size = 43640439 }, + { url = "https://files.pythonhosted.org/packages/e7/b8/0e092f592d280496de52e152582030f8a270b194f87f890e1a97c5599b81/scipy-1.15.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0458839c9f873062db69a03de9a9765ae2e694352c76a16be44f93ea45c28d2b", size = 41619862 }, + { url = "https://files.pythonhosted.org/packages/f6/19/0b6e1173aba4db9e0b7aa27fe45019857fb90d6904038b83927cbe0a6c1d/scipy-1.15.1-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:af0b61c1de46d0565b4b39c6417373304c1d4f5220004058bdad3061c9fa8a95", size = 32610387 }, + { url = "https://files.pythonhosted.org/packages/e7/02/754aae3bd1fa0f2479ade3cfdf1732ecd6b05853f63eee6066a32684563a/scipy-1.15.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:71ba9a76c2390eca6e359be81a3e879614af3a71dfdabb96d1d7ab33da6f2364", size = 24883814 }, + { url = "https://files.pythonhosted.org/packages/1f/ac/d7906201604a2ea3b143bb0de51b3966f66441ba50b7dc182c4505b3edf9/scipy-1.15.1-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:14eaa373c89eaf553be73c3affb11ec6c37493b7eaaf31cf9ac5dffae700c2e0", size = 27944865 }, + { url = "https://files.pythonhosted.org/packages/84/9d/8f539002b5e203723af6a6f513a45e0a7671e9dabeedb08f417ac17e4edc/scipy-1.15.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f735bc41bd1c792c96bc426dece66c8723283695f02df61dcc4d0a707a42fc54", size = 39883261 }, + { url = "https://files.pythonhosted.org/packages/97/c0/62fd3bab828bcccc9b864c5997645a3b86372a35941cdaf677565c25c98d/scipy-1.15.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2722a021a7929d21168830790202a75dbb20b468a8133c74a2c0230c72626b6c", size = 42093299 }, + { url = "https://files.pythonhosted.org/packages/e4/1f/5d46a8d94e9f6d2c913cbb109e57e7eed914de38ea99e2c4d69a9fc93140/scipy-1.15.1-cp313-cp313t-win_amd64.whl", hash = "sha256:bc7136626261ac1ed988dca56cfc4ab5180f75e0ee52e58f1e6aa74b5f3eacd5", size = 43181730 }, ] [[package]] @@ -4252,6 +4606,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d5/7d/9a57e187cbf2fbbbdfd4044a4f9ce141c8d221f9963750d3b001f0ec080d/shapely-2.0.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fea108334be345c283ce74bf064fa00cfdd718048a8af7343c59eb40f59726", size = 2524835 }, { url = "https://files.pythonhosted.org/packages/6d/0a/f407509ab56825f39bf8cfce1fb410238da96cf096809c3e404e5bc71ea1/shapely-2.0.6-cp312-cp312-win32.whl", hash = "sha256:42fd4cd4834747e4990227e4cbafb02242c0cffe9ce7ef9971f53ac52d80d55f", size = 1295613 }, { url = "https://files.pythonhosted.org/packages/7b/b3/857afd9dfbfc554f10d683ac412eac6fa260d1f4cd2967ecb655c57e831a/shapely-2.0.6-cp312-cp312-win_amd64.whl", hash = "sha256:665990c84aece05efb68a21b3523a6b2057e84a1afbef426ad287f0796ef8a48", size = 1442539 }, + { url = "https://files.pythonhosted.org/packages/34/e8/d164ef5b0eab86088cde06dee8415519ffd5bb0dd1bd9d021e640e64237c/shapely-2.0.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:42805ef90783ce689a4dde2b6b2f261e2c52609226a0438d882e3ced40bb3013", size = 1445344 }, + { url = "https://files.pythonhosted.org/packages/ce/e2/9fba7ac142f7831757a10852bfa465683724eadbc93d2d46f74a16f9af04/shapely-2.0.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6d2cb146191a47bd0cee8ff5f90b47547b82b6345c0d02dd8b25b88b68af62d7", size = 1296182 }, + { url = "https://files.pythonhosted.org/packages/cf/dc/790d4bda27d196cd56ec66975eaae3351c65614cafd0e16ddde39ec9fb92/shapely-2.0.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3fdef0a1794a8fe70dc1f514440aa34426cc0ae98d9a1027fb299d45741c381", size = 2423426 }, + { url = "https://files.pythonhosted.org/packages/af/b0/f8169f77eac7392d41e231911e0095eb1148b4d40c50ea9e34d999c89a7e/shapely-2.0.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c665a0301c645615a107ff7f52adafa2153beab51daf34587170d85e8ba6805", size = 2513249 }, + { url = "https://files.pythonhosted.org/packages/f6/1d/a8c0e9ab49ff2f8e4dedd71b0122eafb22a18ad7e9d256025e1f10c84704/shapely-2.0.6-cp313-cp313-win32.whl", hash = "sha256:0334bd51828f68cd54b87d80b3e7cee93f249d82ae55a0faf3ea21c9be7b323a", size = 1294848 }, + { url = "https://files.pythonhosted.org/packages/23/38/2bc32dd1e7e67a471d4c60971e66df0bdace88656c47a9a728ace0091075/shapely-2.0.6-cp313-cp313-win_amd64.whl", hash = "sha256:d37d070da9e0e0f0a530a621e17c0b8c3c9d04105655132a87cfff8bd77cc4c2", size = 1441371 }, ] [[package]] @@ -4423,6 +4783,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/26/32/e0e3a859136e95c85a572e4806dc58bf1ddf651108ae8b97d5f3ebe1a244/tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04", size = 1175432 }, { url = "https://files.pythonhosted.org/packages/c7/89/926b66e9025b97e9fbabeaa59048a736fe3c3e4530a204109571104f921c/tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc", size = 1236576 }, { url = "https://files.pythonhosted.org/packages/45/e2/39d4aa02a52bba73b2cd21ba4533c84425ff8786cc63c511d68c8897376e/tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db", size = 883824 }, + { url = "https://files.pythonhosted.org/packages/e3/38/802e79ba0ee5fcbf240cd624143f57744e5d411d2e9d9ad2db70d8395986/tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24", size = 1039648 }, + { url = "https://files.pythonhosted.org/packages/b1/da/24cdbfc302c98663fbea66f5866f7fa1048405c7564ab88483aea97c3b1a/tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a", size = 982763 }, + { url = "https://files.pythonhosted.org/packages/e4/f0/0ecf79a279dfa41fc97d00adccf976ecc2556d3c08ef3e25e45eb31f665b/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5", size = 1144417 }, + { url = "https://files.pythonhosted.org/packages/ab/d3/155d2d4514f3471a25dc1d6d20549ef254e2aa9bb5b1060809b1d3b03d3a/tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953", size = 1175108 }, + { url = "https://files.pythonhosted.org/packages/19/eb/5989e16821ee8300ef8ee13c16effc20dfc26c777d05fbb6825e3c037b81/tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7", size = 1236520 }, + { url = "https://files.pythonhosted.org/packages/40/59/14b20465f1d1cb89cfbc96ec27e5617b2d41c79da12b5e04e96d689be2a7/tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69", size = 883849 }, ] [[package]] @@ -4471,7 +4837,7 @@ dependencies = [ { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, - { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "setuptools" }, { name = "sympy" }, { name = "triton", marker = "platform_machine == 'x86_64' and platform_system == 'Linux'" }, { name = "typing-extensions" }, @@ -4485,6 +4851,7 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6d/69/d8ada8b6e0a4257556d5b4ddeb4345ea8eeaaef3c98b60d1cca197c7ad8e/torch-2.5.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", size = 91811673 }, { url = "https://files.pythonhosted.org/packages/5f/ba/607d013b55b9fd805db2a5c2662ec7551f1910b4eef39653eeaba182c5b2/torch-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", size = 203046841 }, { url = "https://files.pythonhosted.org/packages/57/6c/bf52ff061da33deb9f94f4121fde7ff3058812cb7d2036c97bc167793bd1/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", size = 63858109 }, + { url = "https://files.pythonhosted.org/packages/69/72/20cb30f3b39a9face296491a86adb6ff8f1a47a897e4d14667e6cf89d5c3/torch-2.5.1-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", size = 906393265 }, ] [[package]] @@ -4616,6 +4983,16 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f2/2c/6990f4ccb41ed93744aaaa3786394bca0875503f97690622f3cafc0adfde/ujson-5.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:604a046d966457b6cdcacc5aa2ec5314f0e8c42bae52842c1e6fa02ea4bda42e", size = 1043576 }, { url = "https://files.pythonhosted.org/packages/14/f5/a2368463dbb09fbdbf6a696062d0c0f62e4ae6fa65f38f829611da2e8fdd/ujson-5.10.0-cp312-cp312-win32.whl", hash = "sha256:6dea1c8b4fc921bf78a8ff00bbd2bfe166345f5536c510671bccececb187c80e", size = 38764 }, { url = "https://files.pythonhosted.org/packages/59/2d/691f741ffd72b6c84438a93749ac57bf1a3f217ac4b0ea4fd0e96119e118/ujson-5.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:38665e7d8290188b1e0d57d584eb8110951a9591363316dd41cf8686ab1d0abc", size = 42211 }, + { url = "https://files.pythonhosted.org/packages/0d/69/b3e3f924bb0e8820bb46671979770c5be6a7d51c77a66324cdb09f1acddb/ujson-5.10.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:618efd84dc1acbd6bff8eaa736bb6c074bfa8b8a98f55b61c38d4ca2c1f7f287", size = 55646 }, + { url = "https://files.pythonhosted.org/packages/32/8a/9b748eb543c6cabc54ebeaa1f28035b1bd09c0800235b08e85990734c41e/ujson-5.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38d5d36b4aedfe81dfe251f76c0467399d575d1395a1755de391e58985ab1c2e", size = 51806 }, + { url = "https://files.pythonhosted.org/packages/39/50/4b53ea234413b710a18b305f465b328e306ba9592e13a791a6a6b378869b/ujson-5.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67079b1f9fb29ed9a2914acf4ef6c02844b3153913eb735d4bf287ee1db6e557", size = 51975 }, + { url = "https://files.pythonhosted.org/packages/b4/9d/8061934f960cdb6dd55f0b3ceeff207fcc48c64f58b43403777ad5623d9e/ujson-5.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d0e0ceeb8fe2468c70ec0c37b439dd554e2aa539a8a56365fd761edb418988", size = 53693 }, + { url = "https://files.pythonhosted.org/packages/f5/be/7bfa84b28519ddbb67efc8410765ca7da55e6b93aba84d97764cd5794dbc/ujson-5.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59e02cd37bc7c44d587a0ba45347cc815fb7a5fe48de16bf05caa5f7d0d2e816", size = 58594 }, + { url = "https://files.pythonhosted.org/packages/48/eb/85d465abafb2c69d9699cfa5520e6e96561db787d36c677370e066c7e2e7/ujson-5.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a890b706b64e0065f02577bf6d8ca3b66c11a5e81fb75d757233a38c07a1f20", size = 997853 }, + { url = "https://files.pythonhosted.org/packages/9f/76/2a63409fc05d34dd7d929357b7a45e3a2c96f22b4225cd74becd2ba6c4cb/ujson-5.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:621e34b4632c740ecb491efc7f1fcb4f74b48ddb55e65221995e74e2d00bbff0", size = 1140694 }, + { url = "https://files.pythonhosted.org/packages/45/ed/582c4daba0f3e1688d923b5cb914ada1f9defa702df38a1916c899f7c4d1/ujson-5.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9500e61fce0cfc86168b248104e954fead61f9be213087153d272e817ec7b4f", size = 1043580 }, + { url = "https://files.pythonhosted.org/packages/d7/0c/9837fece153051e19c7bade9f88f9b409e026b9525927824cdf16293b43b/ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165", size = 38766 }, + { url = "https://files.pythonhosted.org/packages/d7/72/6cb6728e2738c05bbe9bd522d6fc79f86b9a28402f38663e85a28fddd4a0/ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539", size = 42212 }, ] [[package]] @@ -4732,6 +5109,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/06/a7/b4e6a19925c900be9f98bec0a75e6e8f79bb53bdeb891916609ab3958967/uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc", size = 4693770 }, { url = "https://files.pythonhosted.org/packages/ce/0c/f07435a18a4b94ce6bd0677d8319cd3de61f3a9eeb1e5f8ab4e8b5edfcb3/uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb", size = 4451321 }, { url = "https://files.pythonhosted.org/packages/8f/eb/f7032be105877bcf924709c97b1bf3b90255b4ec251f9340cef912559f28/uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f", size = 4659022 }, + { url = "https://files.pythonhosted.org/packages/3f/8d/2cbef610ca21539f0f36e2b34da49302029e7c9f09acef0b1c3b5839412b/uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281", size = 1468123 }, + { url = "https://files.pythonhosted.org/packages/93/0d/b0038d5a469f94ed8f2b2fce2434a18396d8fbfb5da85a0a9781ebbdec14/uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af", size = 819325 }, + { url = "https://files.pythonhosted.org/packages/50/94/0a687f39e78c4c1e02e3272c6b2ccdb4e0085fda3b8352fecd0410ccf915/uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6", size = 4582806 }, + { url = "https://files.pythonhosted.org/packages/d2/19/f5b78616566ea68edd42aacaf645adbf71fbd83fc52281fba555dc27e3f1/uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816", size = 4701068 }, + { url = "https://files.pythonhosted.org/packages/47/57/66f061ee118f413cd22a656de622925097170b9380b30091b78ea0c6ea75/uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc", size = 4454428 }, + { url = "https://files.pythonhosted.org/packages/63/9a/0962b05b308494e3202d3f794a6e85abe471fe3cafdbcf95c2e8c713aabd/uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553", size = 4660018 }, ] [[package]] @@ -4778,6 +5161,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/47/143c92418e30cb9348a4387bfa149c8e0e404a7c5b0585d46d2f7031b4b9/watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94", size = 271822 }, { url = "https://files.pythonhosted.org/packages/ea/94/b0165481bff99a64b29e46e07ac2e0df9f7a957ef13bec4ceab8515f44e3/watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c", size = 285441 }, { url = "https://files.pythonhosted.org/packages/11/de/09fe56317d582742d7ca8c2ca7b52a85927ebb50678d9b0fa8194658f536/watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90", size = 277141 }, + { url = "https://files.pythonhosted.org/packages/08/98/f03efabec64b5b1fa58c0daab25c68ef815b0f320e54adcacd0d6847c339/watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9", size = 390954 }, + { url = "https://files.pythonhosted.org/packages/16/09/4dd49ba0a32a45813debe5fb3897955541351ee8142f586303b271a02b40/watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60", size = 381133 }, + { url = "https://files.pythonhosted.org/packages/76/59/5aa6fc93553cd8d8ee75c6247763d77c02631aed21551a97d94998bf1dae/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407", size = 449516 }, + { url = "https://files.pythonhosted.org/packages/4c/aa/df4b6fe14b6317290b91335b23c96b488d365d65549587434817e06895ea/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d", size = 454820 }, + { url = "https://files.pythonhosted.org/packages/5e/71/185f8672f1094ce48af33252c73e39b48be93b761273872d9312087245f6/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d", size = 481550 }, + { url = "https://files.pythonhosted.org/packages/85/d7/50ebba2c426ef1a5cb17f02158222911a2e005d401caf5d911bfca58f4c4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b", size = 518647 }, + { url = "https://files.pythonhosted.org/packages/f0/7a/4c009342e393c545d68987e8010b937f72f47937731225b2b29b7231428f/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590", size = 497547 }, + { url = "https://files.pythonhosted.org/packages/0f/7c/1cf50b35412d5c72d63b2bf9a4fffee2e1549a245924960dd087eb6a6de4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902", size = 452179 }, + { url = "https://files.pythonhosted.org/packages/d6/a9/3db1410e1c1413735a9a472380e4f431ad9a9e81711cda2aaf02b7f62693/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1", size = 614125 }, + { url = "https://files.pythonhosted.org/packages/f2/e1/0025d365cf6248c4d1ee4c3d2e3d373bdd3f6aff78ba4298f97b4fad2740/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303", size = 611911 }, + { url = "https://files.pythonhosted.org/packages/55/55/035838277d8c98fc8c917ac9beeb0cd6c59d675dc2421df5f9fcf44a0070/watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80", size = 271152 }, + { url = "https://files.pythonhosted.org/packages/f0/e5/96b8e55271685ddbadc50ce8bc53aa2dff278fb7ac4c2e473df890def2dc/watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc", size = 285216 }, ] [[package]] @@ -4835,6 +5230,17 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cf/53/1bf0c06618b5ac35f1d7906444b9958f8485682ab0ea40dee7b17a32da1e/websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05", size = 168712 }, { url = "https://files.pythonhosted.org/packages/e5/22/5ec2f39fff75f44aa626f86fa7f20594524a447d9c3be94d8482cd5572ef/websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0", size = 162838 }, { url = "https://files.pythonhosted.org/packages/74/27/28f07df09f2983178db7bf6c9cccc847205d2b92ced986cd79565d68af4f/websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f", size = 163277 }, + { url = "https://files.pythonhosted.org/packages/34/77/812b3ba5110ed8726eddf9257ab55ce9e85d97d4aa016805fdbecc5e5d48/websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9", size = 161966 }, + { url = "https://files.pythonhosted.org/packages/8d/24/4fcb7aa6986ae7d9f6d083d9d53d580af1483c5ec24bdec0978307a0f6ac/websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b", size = 159625 }, + { url = "https://files.pythonhosted.org/packages/f8/47/2a0a3a2fc4965ff5b9ce9324d63220156bd8bedf7f90824ab92a822e65fd/websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3", size = 159857 }, + { url = "https://files.pythonhosted.org/packages/dd/c8/d7b425011a15e35e17757e4df75b25e1d0df64c0c315a44550454eaf88fc/websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59", size = 169635 }, + { url = "https://files.pythonhosted.org/packages/93/39/6e3b5cffa11036c40bd2f13aba2e8e691ab2e01595532c46437b56575678/websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2", size = 168578 }, + { url = "https://files.pythonhosted.org/packages/cf/03/8faa5c9576299b2adf34dcccf278fc6bbbcda8a3efcc4d817369026be421/websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da", size = 169018 }, + { url = "https://files.pythonhosted.org/packages/8c/05/ea1fec05cc3a60defcdf0bb9f760c3c6bd2dd2710eff7ac7f891864a22ba/websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9", size = 169383 }, + { url = "https://files.pythonhosted.org/packages/21/1d/eac1d9ed787f80754e51228e78855f879ede1172c8b6185aca8cef494911/websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7", size = 168773 }, + { url = "https://files.pythonhosted.org/packages/0e/1b/e808685530185915299740d82b3a4af3f2b44e56ccf4389397c7a5d95d39/websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a", size = 168757 }, + { url = "https://files.pythonhosted.org/packages/b6/19/6ab716d02a3b068fbbeb6face8a7423156e12c446975312f1c7c0f4badab/websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6", size = 162834 }, + { url = "https://files.pythonhosted.org/packages/6c/fd/ab6b7676ba712f2fc89d1347a4b5bdc6aa130de10404071f2b2606450209/websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0", size = 163277 }, { url = "https://files.pythonhosted.org/packages/b0/0b/c7e5d11020242984d9d37990310520ed663b942333b83a033c2f20191113/websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e", size = 156277 }, ] @@ -4884,6 +5290,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 }, { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 }, { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 }, + { url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 }, + { url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 }, + { url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 }, + { url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 }, + { url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 }, + { url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 }, + { url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 }, + { url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 }, + { url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 }, + { url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 }, + { url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 }, + { url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 }, + { url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 }, + { url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 }, + { url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 }, + { url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 }, + { url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 }, + { url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 }, + { url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 }, + { url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 }, + { url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 }, + { url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 }, { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 }, ] @@ -4962,6 +5390,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170 }, { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040 }, { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796 }, + { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795 }, + { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792 }, + { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950 }, + { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980 }, + { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324 }, + { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370 }, + { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911 }, + { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352 }, + { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410 }, + { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322 }, + { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725 }, + { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070 }, + { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172 }, + { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041 }, + { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801 }, ] [[package]] @@ -5007,6 +5450,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/97/8a/568d07c5d4964da5b02621a517532adb8ec5ba181ad1687191fffeda0ab6/yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285", size = 357861 }, { url = "https://files.pythonhosted.org/packages/7d/e3/924c3f64b6b3077889df9a1ece1ed8947e7b61b0a933f2ec93041990a677/yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2", size = 84097 }, { url = "https://files.pythonhosted.org/packages/34/45/0e055320daaabfc169b21ff6174567b2c910c45617b0d79c68d7ab349b02/yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477", size = 90399 }, + { url = "https://files.pythonhosted.org/packages/30/c7/c790513d5328a8390be8f47be5d52e141f78b66c6c48f48d241ca6bd5265/yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb", size = 140789 }, + { url = "https://files.pythonhosted.org/packages/30/aa/a2f84e93554a578463e2edaaf2300faa61c8701f0898725842c704ba5444/yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa", size = 94144 }, + { url = "https://files.pythonhosted.org/packages/c6/fc/d68d8f83714b221a85ce7866832cba36d7c04a68fa6a960b908c2c84f325/yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782", size = 91974 }, + { url = "https://files.pythonhosted.org/packages/56/4e/d2563d8323a7e9a414b5b25341b3942af5902a2263d36d20fb17c40411e2/yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0", size = 333587 }, + { url = "https://files.pythonhosted.org/packages/25/c9/cfec0bc0cac8d054be223e9f2c7909d3e8442a856af9dbce7e3442a8ec8d/yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482", size = 344386 }, + { url = "https://files.pythonhosted.org/packages/ab/5d/4c532190113b25f1364d25f4c319322e86232d69175b91f27e3ebc2caf9a/yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186", size = 345421 }, + { url = "https://files.pythonhosted.org/packages/23/d1/6cdd1632da013aa6ba18cee4d750d953104a5e7aac44e249d9410a972bf5/yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58", size = 339384 }, + { url = "https://files.pythonhosted.org/packages/9a/c4/6b3c39bec352e441bd30f432cda6ba51681ab19bb8abe023f0d19777aad1/yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53", size = 326689 }, + { url = "https://files.pythonhosted.org/packages/23/30/07fb088f2eefdc0aa4fc1af4e3ca4eb1a3aadd1ce7d866d74c0f124e6a85/yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2", size = 345453 }, + { url = "https://files.pythonhosted.org/packages/63/09/d54befb48f9cd8eec43797f624ec37783a0266855f4930a91e3d5c7717f8/yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8", size = 341872 }, + { url = "https://files.pythonhosted.org/packages/91/26/fd0ef9bf29dd906a84b59f0cd1281e65b0c3e08c6aa94b57f7d11f593518/yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1", size = 347497 }, + { url = "https://files.pythonhosted.org/packages/d9/b5/14ac7a256d0511b2ac168d50d4b7d744aea1c1aa20c79f620d1059aab8b2/yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a", size = 359981 }, + { url = "https://files.pythonhosted.org/packages/ca/b3/d493221ad5cbd18bc07e642894030437e405e1413c4236dd5db6e46bcec9/yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10", size = 366229 }, + { url = "https://files.pythonhosted.org/packages/04/56/6a3e2a5d9152c56c346df9b8fb8edd2c8888b1e03f96324d457e5cf06d34/yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8", size = 360383 }, + { url = "https://files.pythonhosted.org/packages/fd/b7/4b3c7c7913a278d445cc6284e59b2e62fa25e72758f888b7a7a39eb8423f/yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d", size = 310152 }, + { url = "https://files.pythonhosted.org/packages/f5/d5/688db678e987c3e0fb17867970700b92603cadf36c56e5fb08f23e822a0c/yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c", size = 315723 }, { url = "https://files.pythonhosted.org/packages/f5/4b/a06e0ec3d155924f77835ed2d167ebd3b211a7b0853da1cf8d8414d784ef/yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b", size = 45109 }, ] @@ -5030,4 +5489,4 @@ source = { registry = "https://pypi.org/simple" } sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545 } wheels = [ { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630 }, -] +] \ No newline at end of file From 23bb0d927f6924c0b5f5a89140fdfbf4bdb1694c Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 18:10:24 -0800 Subject: [PATCH 214/623] chore: format --- backend/open_webui/static/site.webmanifest | 40 +++++++++++----------- backend/open_webui/utils/auth.py | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/backend/open_webui/static/site.webmanifest b/backend/open_webui/static/site.webmanifest index 2b74733fa23..95915ae2bca 100644 --- a/backend/open_webui/static/site.webmanifest +++ b/backend/open_webui/static/site.webmanifest @@ -1,21 +1,21 @@ { - "name": "Open WebUI", - "short_name": "WebUI", - "icons": [ - { - "src": "/static/web-app-manifest-192x192.png", - "sizes": "192x192", - "type": "image/png", - "purpose": "maskable" - }, - { - "src": "/static/web-app-manifest-512x512.png", - "sizes": "512x512", - "type": "image/png", - "purpose": "maskable" - } - ], - "theme_color": "#ffffff", - "background_color": "#ffffff", - "display": "standalone" -} + "name": "Open WebUI", + "short_name": "WebUI", + "icons": [ + { + "src": "/static/web-app-manifest-192x192.png", + "sizes": "192x192", + "type": "image/png", + "purpose": "maskable" + }, + { + "src": "/static/web-app-manifest-512x512.png", + "sizes": "512x512", + "type": "image/png", + "purpose": "maskable" + } + ], + "theme_color": "#ffffff", + "background_color": "#ffffff", + "display": "standalone" +} \ No newline at end of file diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index d0c02a569c3..6dd3234b061 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -72,7 +72,7 @@ def get_license_data(app, key): if key: try: res = requests.post( - "https://api.openwebui.com/api/v1/license", + "https://api.openwebui.com/api/v1/license/", json={"key": key, "version": "1"}, timeout=5, ) From f9b44c6bf2834fa36c5b54cd6706ab699553a384 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 18:39:32 -0800 Subject: [PATCH 215/623] refac --- src/lib/components/chat/Chat.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index 74f57f564f4..338ff75d007 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -1937,7 +1937,7 @@ - {#if $banners.length > 0 && !history.currentId && !$chatId && selectedModels.length <= 1} + {#if ($banners.length > 0 && !history.currentId && !$chatId && selectedModels.length <= 1) || ($config?.license_metadata?.type ?? null) === 'trial' || (($config?.license_metadata?.seats ?? null) !== null && $config?.user_count > $config?.license_metadata?.seats)}
{#if ($config?.license_metadata?.type ?? null) === 'trial'} From 5e78a5c510c959625120445aaebd279d1731efc2 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:06:28 -0800 Subject: [PATCH 216/623] enh: enable_code_execution toggle Co-Authored-By: recrudesce <6450799+recrudesce@users.noreply.github.com> --- backend/open_webui/config.py | 9 ++++++++- backend/open_webui/main.py | 3 +++ backend/open_webui/routers/configs.py | 5 +++++ src/lib/components/admin/Settings/CodeExecution.svelte | 10 ++++++++++ src/lib/components/chat/Chat.svelte | 2 +- src/lib/components/chat/Messages/CodeBlock.svelte | 2 +- 6 files changed, 28 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index b2a1dc00b55..9b8e865fc27 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1377,6 +1377,11 @@ class BannerModel(BaseModel): # Code Interpreter #################################### +ENABLE_CODE_EXECUTION = PersistentConfig( + "ENABLE_CODE_EXECUTION", + "code_execution.enable", + os.environ.get("ENABLE_CODE_EXECUTION", "True").lower() == "true", +) CODE_EXECUTION_ENGINE = PersistentConfig( "CODE_EXECUTION_ENGINE", @@ -1553,7 +1558,9 @@ class BannerModel(BaseModel): ELASTICSEARCH_PASSWORD = os.environ.get("ELASTICSEARCH_PASSWORD", None) ELASTICSEARCH_CLOUD_ID = os.environ.get("ELASTICSEARCH_CLOUD_ID", None) SSL_ASSERT_FINGERPRINT = os.environ.get("SSL_ASSERT_FINGERPRINT", None) -ELASTICSEARCH_INDEX_PREFIX = os.environ.get("ELASTICSEARCH_INDEX_PREFIX", "open_webui_collections") +ELASTICSEARCH_INDEX_PREFIX = os.environ.get( + "ELASTICSEARCH_INDEX_PREFIX", "open_webui_collections" +) # Pgvector PGVECTOR_DB_URL = os.environ.get("PGVECTOR_DB_URL", DATABASE_URL) if VECTOR_DB == "pgvector" and not PGVECTOR_DB_URL.startswith("postgres"): diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 779fcec2b6c..416460837ea 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -105,6 +105,7 @@ # Direct Connections ENABLE_DIRECT_CONNECTIONS, # Code Execution + ENABLE_CODE_EXECUTION, CODE_EXECUTION_ENGINE, CODE_EXECUTION_JUPYTER_URL, CODE_EXECUTION_JUPYTER_AUTH, @@ -660,6 +661,7 @@ async def lifespan(app: FastAPI): # ######################################## +app.state.config.ENABLE_CODE_EXECUTION = ENABLE_CODE_EXECUTION app.state.config.CODE_EXECUTION_ENGINE = CODE_EXECUTION_ENGINE app.state.config.CODE_EXECUTION_JUPYTER_URL = CODE_EXECUTION_JUPYTER_URL app.state.config.CODE_EXECUTION_JUPYTER_AUTH = CODE_EXECUTION_JUPYTER_AUTH @@ -1173,6 +1175,7 @@ async def get_app_config(request: Request): "enable_direct_connections": app.state.config.ENABLE_DIRECT_CONNECTIONS, "enable_channels": app.state.config.ENABLE_CHANNELS, "enable_web_search": app.state.config.ENABLE_RAG_WEB_SEARCH, + "enable_code_execution": app.state.config.ENABLE_CODE_EXECUTION, "enable_code_interpreter": app.state.config.ENABLE_CODE_INTERPRETER, "enable_image_generation": app.state.config.ENABLE_IMAGE_GENERATION, "enable_autocomplete_generation": app.state.config.ENABLE_AUTOCOMPLETE_GENERATION, diff --git a/backend/open_webui/routers/configs.py b/backend/open_webui/routers/configs.py index 388c44f9c61..2a4c651f2a4 100644 --- a/backend/open_webui/routers/configs.py +++ b/backend/open_webui/routers/configs.py @@ -70,6 +70,7 @@ async def set_direct_connections_config( # CodeInterpreterConfig ############################ class CodeInterpreterConfigForm(BaseModel): + ENABLE_CODE_EXECUTION: bool CODE_EXECUTION_ENGINE: str CODE_EXECUTION_JUPYTER_URL: Optional[str] CODE_EXECUTION_JUPYTER_AUTH: Optional[str] @@ -89,6 +90,7 @@ class CodeInterpreterConfigForm(BaseModel): @router.get("/code_execution", response_model=CodeInterpreterConfigForm) async def get_code_execution_config(request: Request, user=Depends(get_admin_user)): return { + "ENABLE_CODE_EXECUTION": request.app.state.config.ENABLE_CODE_EXECUTION, "CODE_EXECUTION_ENGINE": request.app.state.config.CODE_EXECUTION_ENGINE, "CODE_EXECUTION_JUPYTER_URL": request.app.state.config.CODE_EXECUTION_JUPYTER_URL, "CODE_EXECUTION_JUPYTER_AUTH": request.app.state.config.CODE_EXECUTION_JUPYTER_AUTH, @@ -111,6 +113,8 @@ async def set_code_execution_config( request: Request, form_data: CodeInterpreterConfigForm, user=Depends(get_admin_user) ): + request.app.state.config.ENABLE_CODE_EXECUTION = form_data.ENABLE_CODE_EXECUTION + request.app.state.config.CODE_EXECUTION_ENGINE = form_data.CODE_EXECUTION_ENGINE request.app.state.config.CODE_EXECUTION_JUPYTER_URL = ( form_data.CODE_EXECUTION_JUPYTER_URL @@ -153,6 +157,7 @@ async def set_code_execution_config( ) return { + "ENABLE_CODE_EXECUTION": request.app.state.config.ENABLE_CODE_EXECUTION, "CODE_EXECUTION_ENGINE": request.app.state.config.CODE_EXECUTION_ENGINE, "CODE_EXECUTION_JUPYTER_URL": request.app.state.config.CODE_EXECUTION_JUPYTER_URL, "CODE_EXECUTION_JUPYTER_AUTH": request.app.state.config.CODE_EXECUTION_JUPYTER_AUTH, diff --git a/src/lib/components/admin/Settings/CodeExecution.svelte b/src/lib/components/admin/Settings/CodeExecution.svelte index c8353745512..6050fb26bb3 100644 --- a/src/lib/components/admin/Settings/CodeExecution.svelte +++ b/src/lib/components/admin/Settings/CodeExecution.svelte @@ -45,6 +45,16 @@
+
+
+
+ {$i18n.t('Enable Code Execution')} +
+ + +
+
+
{$i18n.t('Code Execution Engine')}
diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index 338ff75d007..ca766c9f765 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -1937,7 +1937,7 @@ - {#if ($banners.length > 0 && !history.currentId && !$chatId && selectedModels.length <= 1) || ($config?.license_metadata?.type ?? null) === 'trial' || (($config?.license_metadata?.seats ?? null) !== null && $config?.user_count > $config?.license_metadata?.seats)} + {#if !history.currentId && !$chatId && selectedModels.length <= 1 && ($banners.length > 0 || ($config?.license_metadata?.type ?? null) === 'trial' || (($config?.license_metadata?.seats ?? null) !== null && $config?.user_count > $config?.license_metadata?.seats))}
{#if ($config?.license_metadata?.type ?? null) === 'trial'} diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index 64fa1d973c8..c6a4f0328a0 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -439,7 +439,7 @@
- {#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))} + {#if ($config?.features?.enable_code_execution ?? true) && (lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code)))} {#if executing}
Running
{:else if run} From 6eccf8668e19ef05424351b6e3a5a9da977eca8c Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:14:43 -0800 Subject: [PATCH 217/623] fix: non-root container issue --- backend/open_webui/config.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 9b8e865fc27..1e265f2ce70 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -593,7 +593,10 @@ def oidc_oauth_register(client): (FRONTEND_BUILD_DIR / "static") ) target_path.parent.mkdir(parents=True, exist_ok=True) - shutil.copyfile(file_path, target_path) + try: + shutil.copyfile(file_path, target_path) + except Exception as e: + logging.error(f"An error occurred: {e}") frontend_favicon = FRONTEND_BUILD_DIR / "static" / "favicon.png" From aaaebfabbe6679071502fa5587ec2e94ce9037dd Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:16:59 -0800 Subject: [PATCH 218/623] chore: format --- package-lock.json | 4 ++-- package.json | 2 +- src/lib/i18n/locales/ar-BH/translation.json | 1 + src/lib/i18n/locales/bg-BG/translation.json | 1 + src/lib/i18n/locales/bn-BD/translation.json | 1 + src/lib/i18n/locales/ca-ES/translation.json | 1 + src/lib/i18n/locales/ceb-PH/translation.json | 1 + src/lib/i18n/locales/cs-CZ/translation.json | 1 + src/lib/i18n/locales/da-DK/translation.json | 1 + src/lib/i18n/locales/de-DE/translation.json | 1 + src/lib/i18n/locales/dg-DG/translation.json | 1 + src/lib/i18n/locales/el-GR/translation.json | 1 + src/lib/i18n/locales/en-GB/translation.json | 1 + src/lib/i18n/locales/en-US/translation.json | 1 + src/lib/i18n/locales/es-ES/translation.json | 1 + src/lib/i18n/locales/eu-ES/translation.json | 1 + src/lib/i18n/locales/fa-IR/translation.json | 1 + src/lib/i18n/locales/fi-FI/translation.json | 1 + src/lib/i18n/locales/fr-CA/translation.json | 1 + src/lib/i18n/locales/fr-FR/translation.json | 1 + src/lib/i18n/locales/he-IL/translation.json | 1 + src/lib/i18n/locales/hi-IN/translation.json | 1 + src/lib/i18n/locales/hr-HR/translation.json | 1 + src/lib/i18n/locales/hu-HU/translation.json | 1 + src/lib/i18n/locales/id-ID/translation.json | 1 + src/lib/i18n/locales/ie-GA/translation.json | 1 + src/lib/i18n/locales/it-IT/translation.json | 1 + src/lib/i18n/locales/ja-JP/translation.json | 1 + src/lib/i18n/locales/ka-GE/translation.json | 1 + src/lib/i18n/locales/ko-KR/translation.json | 1 + src/lib/i18n/locales/lt-LT/translation.json | 1 + src/lib/i18n/locales/ms-MY/translation.json | 1 + src/lib/i18n/locales/nb-NO/translation.json | 1 + src/lib/i18n/locales/nl-NL/translation.json | 1 + src/lib/i18n/locales/pa-IN/translation.json | 1 + src/lib/i18n/locales/pl-PL/translation.json | 1 + src/lib/i18n/locales/pt-BR/translation.json | 1 + src/lib/i18n/locales/pt-PT/translation.json | 1 + src/lib/i18n/locales/ro-RO/translation.json | 1 + src/lib/i18n/locales/ru-RU/translation.json | 1 + src/lib/i18n/locales/sk-SK/translation.json | 1 + src/lib/i18n/locales/sr-RS/translation.json | 1 + src/lib/i18n/locales/sv-SE/translation.json | 1 + src/lib/i18n/locales/th-TH/translation.json | 1 + src/lib/i18n/locales/tk-TW/translation.json | 1 + src/lib/i18n/locales/tr-TR/translation.json | 1 + src/lib/i18n/locales/uk-UA/translation.json | 1 + src/lib/i18n/locales/ur-PK/translation.json | 1 + src/lib/i18n/locales/vi-VN/translation.json | 1 + src/lib/i18n/locales/zh-CN/translation.json | 1 + src/lib/i18n/locales/zh-TW/translation.json | 1 + 51 files changed, 52 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 719e8718d74..e98b0968cc5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.5.19", + "version": "0.5.20", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.5.19", + "version": "0.5.20", "dependencies": { "@azure/msal-browser": "^4.5.0", "@codemirror/lang-javascript": "^6.2.2", diff --git a/package.json b/package.json index 63d7a49c9d8..2e4b905b199 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.5.19", + "version": "0.5.20", "private": true, "scripts": { "dev": "npm run pyodide:fetch && vite dev --host", diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index eb627f62624..76befab8d6b 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "تم تعيين نموذج التضمين على \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "تمكين مشاركة المجتمع", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index 8eda67acfad..58c8df7c7f3 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Модел за вграждане е настроен на \"{{embedding_model}}\"", "Enable API Key": "Активиране на API ключ", "Enable autocomplete generation for chat messages": "Активиране на автоматично довършване за съобщения в чата", + "Enable Code Execution": "", "Enable Code Interpreter": "Активиране на интерпретатор на код", "Enable Community Sharing": "Разрешаване на споделяне в общност", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Активиране на заключване на паметта (mlock), за да се предотврати изваждането на данните на модела от RAM. Тази опция заключва работния набор от страници на модела в RAM, гарантирайки, че няма да бъдат изхвърлени на диска. Това може да помогне за поддържане на производителността, като се избягват грешки в страниците и се осигурява бърз достъп до данните.", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index c132db62b02..a887df62bdd 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "ইমেজ ইমেবডিং মডেল সেট করা হয়েছে - \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "সম্প্রদায় শেয়ারকরণ সক্ষম করুন", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 6581e4d4e1d..b4c6d4e5e5e 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"", "Enable API Key": "Activar la Clau API", "Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat", + "Enable Code Execution": "", "Enable Code Interpreter": "Activar l'intèrpret de codi", "Enable Community Sharing": "Activar l'ús compartit amb la comunitat", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index 2ae99a9290b..95e65eab66f 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 6ee02804ebf..7839d00ab44 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model vkládání nastaven na \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Povolit sdílení komunity", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 598213dcc09..c5cff11ebe2 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding model sat til \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Aktiver deling til Community", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index a7da3d47dd5..7e721e838f5 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding-Modell auf \"{{embedding_model}}\" gesetzt", "Enable API Key": "API-Schlüssel aktivieren", "Enable autocomplete generation for chat messages": "Automatische Vervollständigung für Chat-Nachrichten aktivieren", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Community-Freigabe aktivieren", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 5c9dc67749b..6da9d7fd776 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index 0a5b2393e53..38b33d0b065 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Το μοντέλο ενσωμάτωσης έχει οριστεί σε \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Ενεργοποίηση Κοινοτικής Κοινής Χρήσης", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ενεργοποίηση Κλείδωσης Μνήμης (mlock) για την αποτροπή της ανταλλαγής δεδομένων του μοντέλου από τη μνήμη RAM. Αυτή η επιλογή κλειδώνει το σύνολο εργασίας των σελίδων του μοντέλου στη μνήμη RAM, διασφαλίζοντας ότι δεν θα ανταλλαχθούν στο δίσκο. Αυτό μπορεί να βοηθήσει στη διατήρηση της απόδοσης αποφεύγοντας σφάλματα σελίδων και διασφαλίζοντας γρήγορη πρόσβαση στα δεδομένα.", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index f1aa076c94e..e1f06f335c7 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index f1aa076c94e..e1f06f335c7 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index c7817e8b31d..00f6e18d152 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modelo de Embedding configurado a \"{{embedding_model}}\"", "Enable API Key": "Habilitar clave de API", "Enable autocomplete generation for chat messages": "Habilitar generación de autocompletado para mensajes de chat", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Habilitar el uso compartido de la comunidad", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilitar bloqueo de memoria (mlock) para evitar que los datos del modelo se intercambien fuera de la RAM. Esta opción bloquea el conjunto de páginas de trabajo del modelo en la RAM, asegurando que no se intercambiarán fuera del disco. Esto puede ayudar a mantener el rendimiento evitando fallos de página y asegurando un acceso rápido a los datos.", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index f9c1d9f10f1..5759a474764 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding eredua \"{{embedding_model}}\"-ra ezarri da", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Gaitu Komunitatearen Partekatzea", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Gaitu Memoria Blokeatzea (mlock) ereduaren datuak RAM memoriatik kanpo ez trukatzeko. Aukera honek ereduaren lan-orri multzoa RAMean blokatzen du, diskora ez direla trukatuko ziurtatuz. Honek errendimendua mantentzen lagun dezake, orri-hutsegiteak saihestuz eta datuen sarbide azkarra bermatuz.", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 937cfbcdbab..3ce451d2cf2 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "مدل پیدائش را به \"{{embedding_model}}\" تنظیم کنید", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "فعالسازی اشتراک انجمن", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index 4b9ff557aed..6fea592da5d 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "\"{{embedding_model}}\" valittu upotusmalliksi", "Enable API Key": "Ota API -avain käyttöön", "Enable autocomplete generation for chat messages": "Ota automaattinen täydennys käyttöön keskusteluviesteissä", + "Enable Code Execution": "", "Enable Code Interpreter": "Ota ohjelmatulkki käyttöön", "Enable Community Sharing": "Ota yhteisön jakaminen käyttöön", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ota Memory Locking (mlock) käyttöön estääksesi mallidatan vaihtamisen pois RAM-muistista. Tämä lukitsee mallin työsivut RAM-muistiin, varmistaen että niitä ei vaihdeta levylle. Tämä voi parantaa suorituskykyä välttämällä sivuvikoja ja varmistamalla nopean tietojen käytön.", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index 769c95787ca..7a08c832179 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modèle d'encodage défini sur « {{embedding_model}} »", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Activer le partage communautaire", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 9cba643358e..5116d65bc93 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modèle d'embedding défini sur « {{embedding_model}} »", "Enable API Key": "Activer la clé API", "Enable autocomplete generation for chat messages": "Activer la génération des suggestions pour les messages", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Activer le partage communautaire", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activer le verrouillage de la mémoire (mlock) pour empêcher les données du modèle d'être échangées de la RAM. Cette option verrouille l'ensemble de pages de travail du modèle en RAM, garantissant qu'elles ne seront pas échangées vers le disque. Cela peut aider à maintenir les performances en évitant les défauts de page et en assurant un accès rapide aux données.", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index e5af652c94e..6021462e158 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "מודל ההטמעה הוגדר ל-\"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "הפיכת שיתוף קהילה לזמין", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 2f25632d9eb..47584b852a1 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "एम्बेडिंग मॉडल को \"{{embedding_model}}\" पर सेट किया गया", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "समुदाय साझाकरण सक्षम करें", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index ffd87f83d7c..d36aac01992 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding model postavljen na \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Omogući zajedničko korištenje zajednice", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index 306ec737f19..ce86bb1318f 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Beágyazási modell beállítva: \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Közösségi megosztás engedélyezése", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index 43f4c58fc90..2b5ec88f712 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model penyematan diatur ke \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Aktifkan Berbagi Komunitas", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 41b803ff1e1..bfa7bd63c10 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Múnla leabaithe socraithe go \"{{embedding_model}}\"", "Enable API Key": "Cumasaigh Eochair API", "Enable autocomplete generation for chat messages": "Cumasaigh giniúint uathchríochnaithe le haghaidh teachtaireachtaí comhrá", + "Enable Code Execution": "", "Enable Code Interpreter": "Cumasaigh Ateangaire Cóid", "Enable Community Sharing": "Cumasaigh Comhroinnt Pobail", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Cumasaigh Glasáil Cuimhne (mlock) chun sonraí samhaltaithe a chosc ó RAM. Glasálann an rogha seo sraith oibre leathanaigh an mhúnla isteach i RAM, ag cinntiú nach ndéanfar iad a mhalartú go diosca. Is féidir leis seo cabhrú le feidhmíocht a choinneáil trí lochtanna leathanaigh a sheachaint agus rochtain tapa ar shonraí a chinntiú.", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index 2499e366c07..17aa0dedef4 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modello di embedding impostato su \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Abilita la condivisione della community", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 60ebb353046..78cee236f28 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "埋め込みモデルを\"{{embedding_model}}\"に設定しました", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "コミュニティ共有を有効にする", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 980a1bec8bb..f64fa449998 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "ჩაშენებული მოდელი დაყენებულია მნიშვნელობაზე \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "საზოგადოების გაზიარების ჩართვა", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index bed205601bd..649eae61b28 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "임베딩 모델을 \"{{embedding_model}}\"로 설정함", "Enable API Key": "API 키 활성화", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "커뮤니티 공유 활성화", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index ef85c677e8e..031fc412d81 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding modelis nustatytas kaip\"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Leisti dalinimąsi su bendruomene", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 8a1b1e021e5..9d6501c725b 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model Benamkan ditetapkan kepada \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Benarkan Perkongsian Komuniti", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 4e97236ff03..7aa4c97205f 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Innbyggingsmodell angitt til \"{{embedding_model}}\"", "Enable API Key": "Aktiver ", "Enable autocomplete generation for chat messages": "Aktiver automatisk utfylling av chatmeldinger", + "Enable Code Execution": "", "Enable Code Interpreter": "Aktiver kodetolker", "Enable Community Sharing": "Aktiver deling i fellesskap", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiver Memory Locking (mlock) for å forhindre at modelldata byttes ut av RAM. Dette alternativet låser modellens arbeidssett med sider i RAM-minnet, slik at de ikke byttes ut til disk. Dette kan bidra til å opprettholde ytelsen ved å unngå sidefeil og sikre rask datatilgang.", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 25ffe027c75..f3909e9e873 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding model ingesteld op \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Delen via de community inschakelen", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Schakel Memory Locking (mlock) in om te voorkomen dat modelgegevens uit het RAM worden verwisseld. Deze optie vergrendelt de werkset pagina's van het model in het RAM, zodat ze niet naar de schijf worden uitgewisseld. Dit kan helpen om de prestaties op peil te houden door paginafouten te voorkomen en snelle gegevenstoegang te garanderen.", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index e473441e1fa..e4197505eb5 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "ਐਮਬੈੱਡਿੰਗ ਮਾਡਲ ਨੂੰ \"{{embedding_model}}\" 'ਤੇ ਸੈੱਟ ਕੀਤਾ ਗਿਆ", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "ਕਮਿਊਨਿਟੀ ਸ਼ੇਅਰਿੰਗ ਨੂੰ ਸਮਰੱਥ ਕਰੋ", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index f3e1be87d1b..cbba5009b8b 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model osadzania ustawiony na '{{embedding_model}}'", "Enable API Key": "Włącz klucz API", "Enable autocomplete generation for chat messages": "Włącz generowanie autouzupełniania dla wiadomości czatu", + "Enable Code Execution": "", "Enable Code Interpreter": "Włącz interpreter kodu", "Enable Community Sharing": "Włączanie udostępniania społecznościowego", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Włącz blokowanie pamięci (mlock), aby zapobiec swappingowi danych modelu z RAM. Ta opcja blokuje zbiór stron roboczych modelu w RAM, co gwarantuje, że nie będą one wymieniane na dysk. Może to pomóc w utrzymaniu wydajności poprzez unikanie błędów strony i zapewnienie szybkiego dostępu do danych.", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 9dc5c4d602d..c3cf9ea7f3b 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modelo de embedding definido para \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Ativar Compartilhamento com a Comunidade", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilite o bloqueio de memória (mlock) para evitar que os dados do modelo sejam transferidos da RAM para a área de troca (swap). Essa opção bloqueia o conjunto de páginas em uso pelo modelo na RAM, garantindo que elas não sejam transferidas para o disco. Isso pode ajudar a manter o desempenho, evitando falhas de página e garantindo acesso rápido aos dados.", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index 12799885db4..9bcb3b7ff27 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modelo de Embedding definido como \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Active a Partilha da Comunidade", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index 309e1e309f2..dce026085fa 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Modelul de încapsulare setat la \"{{embedding_model}}\"", "Enable API Key": "Activează cheia API", "Enable autocomplete generation for chat messages": "Activează generarea automată pentru mesajele de chat", + "Enable Code Execution": "", "Enable Code Interpreter": "Activează interpretul de cod", "Enable Community Sharing": "Activează Partajarea Comunitară", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 2b6349ed642..6213f55a759 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Модель встраивания установлена в \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "Включить генерацию автозаполнения для сообщений чата", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Включить совместное использование", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Включите блокировку памяти (mlock), чтобы предотвратить выгрузку данных модели из ОЗУ. Эта опция блокирует рабочий набор страниц модели в оперативной памяти, гарантируя, что они не будут выгружены на диск. Это может помочь поддерживать производительность, избегая ошибок страниц и обеспечивая быстрый доступ к данным.", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index 549a9bdf43b..3ca7cac5324 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model vkladania nastavený na \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Povoliť zdieľanie komunity", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index eb41f50b3c7..abe8167154f 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Модел уградње подешен на \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Омогући дељење заједнице", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index 2feb7f338e6..d3c4ebb994b 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Inbäddningsmodell inställd på \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Aktivera community-delning", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 7beaed61283..63594bfef2f 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "ตั้งค่าโมเดลการฝังเป็น \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "เปิดใช้งานการแชร์ในชุมชน", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index f1aa076c94e..e1f06f335c7 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index e62e1f52a69..e7082300f2c 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Gömme modeli \"{{embedding_model}}\" olarak ayarlandı", "Enable API Key": "API Anahtarını Etkinleştir", "Enable autocomplete generation for chat messages": "Sohbet mesajları için otomatik tamamlama üretimini etkinleştir", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Topluluk Paylaşımını Etkinleştir", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index 4efc20a7a2b..5e9c7e07f71 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Встановлена модель вбудовування \"{{embedding_model}}\"", "Enable API Key": "Увімкнути ключ API", "Enable autocomplete generation for chat messages": "Увімкнути генерацію автозаповнення для повідомлень чату", + "Enable Code Execution": "", "Enable Code Interpreter": "Увімкнути інтерпретатор коду", "Enable Community Sharing": "Увімкнути спільний доступ", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Увімкнути блокування пам'яті (mlock), щоб запобігти виведенню даних моделі з оперативної пам'яті. Цей параметр блокує робочий набір сторінок моделі в оперативній пам'яті, гарантуючи, що вони не будуть виведені на диск. Це може допомогти підтримувати продуктивність, уникати помилок сторінок та забезпечувати швидкий доступ до даних.", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index ab255fc34fb..ffd539aa3ed 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "ایمبیڈنگ ماڈل \"{{embedding_model}}\" پر سیٹ کیا گیا ہے", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "کمیونٹی شیئرنگ فعال کریں", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 3f40b10b566..52c7a8201bf 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Mô hình embedding đã được thiết lập thành \"{{embedding_model}}\"", "Enable API Key": "", "Enable autocomplete generation for chat messages": "", + "Enable Code Execution": "", "Enable Code Interpreter": "", "Enable Community Sharing": "Cho phép Chia sẻ Cộng đồng", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index eb848fc91c8..ebb53a1b535 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "语义向量模型设置为 \"{{embedding_model}}\"", "Enable API Key": "启用 API 密钥", "Enable autocomplete generation for chat messages": "启用聊天消息的输入框内容猜测补全", + "Enable Code Execution": "", "Enable Code Interpreter": "启用代码解释器", "Enable Community Sharing": "启用分享至社区", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index cba9e6291ad..884e5a19cc3 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -359,6 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "嵌入模型已設定為 \"{{embedding_model}}\"", "Enable API Key": "啟用 API 金鑰", "Enable autocomplete generation for chat messages": "啟用聊天訊息的自動完成生成", + "Enable Code Execution": "", "Enable Code Interpreter": "啟用程式碼解釋器", "Enable Community Sharing": "啟用社群分享", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "啟用記憶體鎖定(mlock)以防止模型資料被換出 RAM。此選項會將模型的工作頁面集鎖定在 RAM 中,確保它們不會被換出到磁碟。這可以透過避免頁面錯誤和確保快速資料存取來維持效能。", From d4fca9dabf084bd205473386e37d94e13cb9b487 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:17:41 -0800 Subject: [PATCH 219/623] chore: format --- .../retrieval/vector/dbs/elasticsearch.py | 183 ++++++++---------- 1 file changed, 81 insertions(+), 102 deletions(-) diff --git a/backend/open_webui/retrieval/vector/dbs/elasticsearch.py b/backend/open_webui/retrieval/vector/dbs/elasticsearch.py index a558e1fb0c3..c896284946e 100644 --- a/backend/open_webui/retrieval/vector/dbs/elasticsearch.py +++ b/backend/open_webui/retrieval/vector/dbs/elasticsearch.py @@ -1,30 +1,28 @@ from elasticsearch import Elasticsearch, BadRequestError from typing import Optional import ssl -from elasticsearch.helpers import bulk,scan +from elasticsearch.helpers import bulk, scan from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult from open_webui.config import ( ELASTICSEARCH_URL, - ELASTICSEARCH_CA_CERTS, + ELASTICSEARCH_CA_CERTS, ELASTICSEARCH_API_KEY, ELASTICSEARCH_USERNAME, - ELASTICSEARCH_PASSWORD, + ELASTICSEARCH_PASSWORD, ELASTICSEARCH_CLOUD_ID, ELASTICSEARCH_INDEX_PREFIX, SSL_ASSERT_FINGERPRINT, - ) - - class ElasticsearchClient: """ Important: - in order to reduce the number of indexes and since the embedding vector length is fixed, we avoid creating - an index for each file but store it as a text field, while seperating to different index + in order to reduce the number of indexes and since the embedding vector length is fixed, we avoid creating + an index for each file but store it as a text field, while seperating to different index baesd on the embedding length. """ + def __init__(self): self.index_prefix = ELASTICSEARCH_INDEX_PREFIX self.client = Elasticsearch( @@ -32,15 +30,19 @@ def __init__(self): ca_certs=ELASTICSEARCH_CA_CERTS, api_key=ELASTICSEARCH_API_KEY, cloud_id=ELASTICSEARCH_CLOUD_ID, - basic_auth=(ELASTICSEARCH_USERNAME,ELASTICSEARCH_PASSWORD) if ELASTICSEARCH_USERNAME and ELASTICSEARCH_PASSWORD else None, - ssl_assert_fingerprint=SSL_ASSERT_FINGERPRINT - + basic_auth=( + (ELASTICSEARCH_USERNAME, ELASTICSEARCH_PASSWORD) + if ELASTICSEARCH_USERNAME and ELASTICSEARCH_PASSWORD + else None + ), + ssl_assert_fingerprint=SSL_ASSERT_FINGERPRINT, ) - #Status: works - def _get_index_name(self,dimension:int)->str: + + # Status: works + def _get_index_name(self, dimension: int) -> str: return f"{self.index_prefix}_d{str(dimension)}" - - #Status: works + + # Status: works def _scan_result_to_get_result(self, result) -> GetResult: if not result: return None @@ -55,7 +57,7 @@ def _scan_result_to_get_result(self, result) -> GetResult: return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas]) - #Status: works + # Status: works def _result_to_get_result(self, result) -> GetResult: if not result["hits"]["hits"]: return None @@ -70,7 +72,7 @@ def _result_to_get_result(self, result) -> GetResult: return GetResult(ids=[ids], documents=[documents], metadatas=[metadatas]) - #Status: works + # Status: works def _result_to_search_result(self, result) -> SearchResult: ids = [] distances = [] @@ -84,19 +86,21 @@ def _result_to_search_result(self, result) -> SearchResult: metadatas.append(hit["_source"].get("metadata")) return SearchResult( - ids=[ids], distances=[distances], documents=[documents], metadatas=[metadatas] + ids=[ids], + distances=[distances], + documents=[documents], + metadatas=[metadatas], ) - #Status: works + + # Status: works def _create_index(self, dimension: int): body = { "mappings": { "dynamic_templates": [ { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "strings": { + "match_mapping_type": "string", + "mapping": {"type": "keyword"}, } } ], @@ -111,68 +115,52 @@ def _create_index(self, dimension: int): }, "text": {"type": "text"}, "metadata": {"type": "object"}, - } + }, } } self.client.indices.create(index=self._get_index_name(dimension), body=body) - #Status: works + + # Status: works def _create_batches(self, items: list[VectorItem], batch_size=100): for i in range(0, len(items), batch_size): - yield items[i : min(i + batch_size,len(items))] + yield items[i : min(i + batch_size, len(items))] - #Status: works - def has_collection(self,collection_name) -> bool: + # Status: works + def has_collection(self, collection_name) -> bool: query_body = {"query": {"bool": {"filter": []}}} - query_body["query"]["bool"]["filter"].append({"term": {"collection": collection_name}}) + query_body["query"]["bool"]["filter"].append( + {"term": {"collection": collection_name}} + ) try: - result = self.client.count( - index=f"{self.index_prefix}*", - body=query_body - ) - - return result.body["count"]>0 + result = self.client.count(index=f"{self.index_prefix}*", body=query_body) + + return result.body["count"] > 0 except Exception as e: return None - - def delete_collection(self, collection_name: str): - query = { - "query": { - "term": {"collection": collection_name} - } - } + query = {"query": {"term": {"collection": collection_name}}} self.client.delete_by_query(index=f"{self.index_prefix}*", body=query) - #Status: works + + # Status: works def search( self, collection_name: str, vectors: list[list[float]], limit: int ) -> Optional[SearchResult]: query = { "size": limit, - "_source": [ - "text", - "metadata" - ], + "_source": ["text", "metadata"], "query": { "script_score": { "query": { - "bool": { - "filter": [ - { - "term": { - "collection": collection_name - } - } - ] - } + "bool": {"filter": [{"term": {"collection": collection_name}}]} }, "script": { "source": "cosineSimilarity(params.vector, 'vector') + 1.0", "params": { "vector": vectors[0] - }, # Assuming single query vector + }, # Assuming single query vector }, } }, @@ -183,7 +171,8 @@ def search( ) return self._result_to_search_result(result) - #Status: only tested halfwat + + # Status: only tested halfwat def query( self, collection_name: str, filter: dict, limit: Optional[int] = None ) -> Optional[GetResult]: @@ -197,7 +186,9 @@ def query( for field, value in filter.items(): query_body["query"]["bool"]["filter"].append({"term": {field: value}}) - query_body["query"]["bool"]["filter"].append({"term": {"collection": collection_name}}) + query_body["query"]["bool"]["filter"].append( + {"term": {"collection": collection_name}} + ) size = limit if limit else 10 try: @@ -206,59 +197,53 @@ def query( body=query_body, size=size, ) - + return self._result_to_get_result(result) except Exception as e: return None - #Status: works - def _has_index(self,dimension:int): - return self.client.indices.exists(index=self._get_index_name(dimension=dimension)) + # Status: works + def _has_index(self, dimension: int): + return self.client.indices.exists( + index=self._get_index_name(dimension=dimension) + ) def get_or_create_index(self, dimension: int): if not self._has_index(dimension=dimension): self._create_index(dimension=dimension) - #Status: works + + # Status: works def get(self, collection_name: str) -> Optional[GetResult]: # Get all the items in the collection. query = { - "query": { - "bool": { - "filter": [ - { - "term": { - "collection": collection_name - } - } - ] - } - }, "_source": ["text", "metadata"]} + "query": {"bool": {"filter": [{"term": {"collection": collection_name}}]}}, + "_source": ["text", "metadata"], + } results = list(scan(self.client, index=f"{self.index_prefix}*", query=query)) - + return self._scan_result_to_get_result(results) - #Status: works + # Status: works def insert(self, collection_name: str, items: list[VectorItem]): if not self._has_index(dimension=len(items[0]["vector"])): self._create_index(dimension=len(items[0]["vector"])) - for batch in self._create_batches(items): actions = [ - { - "_index":self._get_index_name(dimension=len(items[0]["vector"])), - "_id": item["id"], - "_source": { - "collection": collection_name, - "vector": item["vector"], - "text": item["text"], - "metadata": item["metadata"], - }, - } + { + "_index": self._get_index_name(dimension=len(items[0]["vector"])), + "_id": item["id"], + "_source": { + "collection": collection_name, + "vector": item["vector"], + "text": item["text"], + "metadata": item["metadata"], + }, + } for item in batch ] - bulk(self.client,actions) + bulk(self.client, actions) # Upsert documents using the update API with doc_as_upsert=True. def upsert(self, collection_name: str, items: list[VectorItem]): @@ -280,8 +265,7 @@ def upsert(self, collection_name: str, items: list[VectorItem]): } for item in batch ] - bulk(self.client,actions) - + bulk(self.client, actions) # Delete specific documents from a collection by filtering on both collection and document IDs. def delete( @@ -292,21 +276,16 @@ def delete( ): query = { - "query": { - "bool": { - "filter": [ - {"term": {"collection": collection_name}} - ] - } - } + "query": {"bool": {"filter": [{"term": {"collection": collection_name}}]}} } - #logic based on chromaDB + # logic based on chromaDB if ids: query["query"]["bool"]["filter"].append({"terms": {"_id": ids}}) elif filter: for field, value in filter.items(): - query["query"]["bool"]["filter"].append({"term": {f"metadata.{field}": value}}) - + query["query"]["bool"]["filter"].append( + {"term": {f"metadata.{field}": value}} + ) self.client.delete_by_query(index=f"{self.index_prefix}*", body=query) From c0ecff9d709f371d383c46a72f63e10ba6e7750b Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:36:30 -0800 Subject: [PATCH 220/623] fix: pinyin keyboard --- src/lib/components/chat/MessageInput.svelte | 14 ++++++++++++++ src/lib/components/common/RichTextInput.svelte | 11 +++++++++++ 2 files changed, 25 insertions(+) diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index ae7717a11b4..7db31010b63 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -85,6 +85,8 @@ let loaded = false; let recording = false; + let isComposing = false; + let chatInputContainerElement; let chatInputElement; @@ -707,6 +709,8 @@ console.log(res); return res; }} + oncompositionstart={() => (isComposing = true)} + oncompositionend={() => (isComposing = false)} on:keydown={async (e) => { e = e.detail.event; @@ -806,6 +810,10 @@ navigator.msMaxTouchPoints > 0 ) ) { + if (isComposing) { + return; + } + // Uses keyCode '13' for Enter key for chinese/japanese keyboards. // // Depending on the user's settings, it will send the message @@ -882,6 +890,8 @@ class="scrollbar-hidden bg-transparent dark:text-gray-100 outline-hidden w-full pt-3 px-1 resize-none" placeholder={placeholder ? placeholder : $i18n.t('Send a Message')} bind:value={prompt} + on:compositionstart={() => (isComposing = true)} + on:compositionend={() => (isComposing = false)} on:keydown={async (e) => { const isCtrlPressed = e.ctrlKey || e.metaKey; // metaKey is for Cmd key on Mac @@ -983,6 +993,10 @@ navigator.msMaxTouchPoints > 0 ) ) { + if (isComposing) { + return; + } + console.log('keypress', e); // Prevent Enter key from creating a new line const isCtrlPressed = e.ctrlKey || e.metaKey; diff --git a/src/lib/components/common/RichTextInput.svelte b/src/lib/components/common/RichTextInput.svelte index 80f92694cd2..3a30eb646c3 100644 --- a/src/lib/components/common/RichTextInput.svelte +++ b/src/lib/components/common/RichTextInput.svelte @@ -27,6 +27,9 @@ import { PASTED_TEXT_CHARACTER_LIMIT } from '$lib/constants'; + export let oncompositionstart = (e) => {}; + export let oncompositionend = (e) => {}; + // create a lowlight instance with all languages loaded const lowlight = createLowlight(all); @@ -226,6 +229,14 @@ editorProps: { attributes: { id }, handleDOMEvents: { + compositionstart: (view, event) => { + oncompositionstart(event); + return false; + }, + compositionend: (view, event) => { + oncompositionend(event); + return false; + }, focus: (view, event) => { eventDispatch('focus', { event }); return false; From 1173459eee8a1c46a77883bd898f7eea082160cd Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 5 Mar 2025 19:42:41 -0800 Subject: [PATCH 221/623] doc: changelog --- CHANGELOG.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e11228706a..da4046e73fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,19 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.5.19] - 2024-03-04 +## [0.5.20] - 2025-03-05 + +### Added + +- **⚡ Toggle Code Execution On/Off**: You can now enable or disable code execution, providing more control over security, ensuring a safer and more customizable experience. + +### Fixed + +- **📜 Pinyin Keyboard Enter Key Now Works Properly**: Resolved an issue where the Enter key for Pinyin keyboards was not functioning as expected, ensuring seamless input for Chinese users. +- **🖼️ Web Manifest Loading Issue Fixed**: Addressed inconsistencies with 'site.webmanifest', guaranteeing proper loading and representation of the app across different browsers and devices. +- **📦 Non-Root Container Issue Resolved**: Fixed a critical issue where the UI failed to load correctly in non-root containers, ensuring reliable deployment in various environments. + +## [0.5.19] - 2025-03-04 ### Added From 41a4cf7106639b46b68a3ef4117129e14b54c633 Mon Sep 17 00:00:00 2001 From: Marko Henning Date: Thu, 6 Mar 2025 10:47:57 +0100 Subject: [PATCH 222/623] Added new k_reranker parameter --- backend/open_webui/config.py | 5 +++++ backend/open_webui/main.py | 2 ++ backend/open_webui/retrieval/utils.py | 7 ++++++- backend/open_webui/routers/retrieval.py | 8 ++++++++ backend/open_webui/utils/middleware.py | 1 + .../components/admin/Settings/Documents.svelte | 18 ++++++++++++++++++ 6 files changed, 40 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 1e265f2ce70..c832b88a29f 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1646,6 +1646,11 @@ class BannerModel(BaseModel): RAG_TOP_K = PersistentConfig( "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3")) ) +RAG_TOP_K_RERANKER = PersistentConfig( + "RAG_TOP_K_RERANKER", + "rag.top_k_reranker", + int(os.environ.get("RAG_TOP_K_RERANKER", "3")) +) RAG_RELEVANCE_THRESHOLD = PersistentConfig( "RAG_RELEVANCE_THRESHOLD", "rag.relevance_threshold", diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 416460837ea..3c83aba114a 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -189,6 +189,7 @@ DOCUMENT_INTELLIGENCE_ENDPOINT, DOCUMENT_INTELLIGENCE_KEY, RAG_TOP_K, + RAG_TOP_K_RERANKER, RAG_TEXT_SPLITTER, TIKTOKEN_ENCODING_NAME, PDF_EXTRACT_IMAGES, @@ -535,6 +536,7 @@ async def lifespan(app: FastAPI): app.state.config.TOP_K = RAG_TOP_K +app.state.config.TOP_K_RERANKER = RAG_TOP_K_RERANKER app.state.config.RELEVANCE_THRESHOLD = RAG_RELEVANCE_THRESHOLD app.state.config.FILE_MAX_SIZE = RAG_FILE_MAX_SIZE app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 029a33a56cd..965b49b8807 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -106,6 +106,7 @@ def query_doc_with_hybrid_search( embedding_function, k: int, reranking_function, + k_reranker: int, r: float, ) -> dict: try: @@ -128,7 +129,7 @@ def query_doc_with_hybrid_search( ) compressor = RerankCompressor( embedding_function=embedding_function, - top_n=k, + top_n=k_reranker, reranking_function=reranking_function, r_score=r, ) @@ -267,6 +268,7 @@ def query_collection_with_hybrid_search( embedding_function, k: int, reranking_function, + k_reranker: int, r: float, ) -> dict: results = [] @@ -280,6 +282,7 @@ def query_collection_with_hybrid_search( embedding_function=embedding_function, k=k, reranking_function=reranking_function, + k_reranker=k_reranker, r=r, ) results.append(result) @@ -345,6 +348,7 @@ def get_sources_from_files( embedding_function, k, reranking_function, + k_reranker, r, hybrid_search, full_context=False, @@ -461,6 +465,7 @@ def get_sources_from_files( embedding_function=embedding_function, k=k, reranking_function=reranking_function, + k_reranker=k_reranker, r=r, ) except Exception as e: diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index ac38c236e55..9ab28fd39b7 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -713,6 +713,7 @@ async def get_query_settings(request: Request, user=Depends(get_admin_user)): "status": True, "template": request.app.state.config.RAG_TEMPLATE, "k": request.app.state.config.TOP_K, + "k_reranker": request.app.state.config.TOP_K_RERANKER, "r": request.app.state.config.RELEVANCE_THRESHOLD, "hybrid": request.app.state.config.ENABLE_RAG_HYBRID_SEARCH, } @@ -720,6 +721,7 @@ async def get_query_settings(request: Request, user=Depends(get_admin_user)): class QuerySettingsForm(BaseModel): k: Optional[int] = None + k_reranker: Optional[int] = None r: Optional[float] = None template: Optional[str] = None hybrid: Optional[bool] = None @@ -731,6 +733,7 @@ async def update_query_settings( ): request.app.state.config.RAG_TEMPLATE = form_data.template request.app.state.config.TOP_K = form_data.k if form_data.k else 4 + request.app.state.config.TOP_K_RERANKER = form_data.k_reranker or 4 request.app.state.config.RELEVANCE_THRESHOLD = form_data.r if form_data.r else 0.0 request.app.state.config.ENABLE_RAG_HYBRID_SEARCH = ( @@ -741,6 +744,7 @@ async def update_query_settings( "status": True, "template": request.app.state.config.RAG_TEMPLATE, "k": request.app.state.config.TOP_K, + "k_reranker": request.app.state.config.TOP_K_RERANKER, "r": request.app.state.config.RELEVANCE_THRESHOLD, "hybrid": request.app.state.config.ENABLE_RAG_HYBRID_SEARCH, } @@ -1488,6 +1492,7 @@ class QueryDocForm(BaseModel): collection_name: str query: str k: Optional[int] = None + k_reranker: Optional[int] = None r: Optional[float] = None hybrid: Optional[bool] = None @@ -1508,6 +1513,7 @@ def query_doc_handler( ), k=form_data.k if form_data.k else request.app.state.config.TOP_K, reranking_function=request.app.state.rf, + k_reranker=form_data.k_reranker or request.app.state.config.TOP_K_RERANKER, r=( form_data.r if form_data.r @@ -1536,6 +1542,7 @@ class QueryCollectionsForm(BaseModel): collection_names: list[str] query: str k: Optional[int] = None + k_reranker: Optional[int] = None r: Optional[float] = None hybrid: Optional[bool] = None @@ -1556,6 +1563,7 @@ def query_collection_handler( ), k=form_data.k if form_data.k else request.app.state.config.TOP_K, reranking_function=request.app.state.rf, + k_reranker=form_data.k_reranker or request.app.state.config.TOP_K_RERANKER, r=( form_data.r if form_data.r diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index 289d887dfdb..0ec034b8fb5 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -567,6 +567,7 @@ async def chat_completion_files_handler( ), k=request.app.state.config.TOP_K, reranking_function=request.app.state.rf, + k_reranker=request.app.state.config.TOP_K_RERANKER, r=request.app.state.config.RELEVANCE_THRESHOLD, hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH, full_context=request.app.state.config.RAG_FULL_CONTEXT, diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 0d911af898a..1835f330a17 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -74,6 +74,7 @@ template: '', r: 0.0, k: 4, + k_reranker: 4, hybrid: false }; @@ -738,6 +739,23 @@
+ {#if querySettings.hybrid === true} +
+
{$i18n.t('Top K Reranker')}
+
+ +
+
+ {/if} + + {#if querySettings.hybrid === true}
From 9cc9df301836cb9a10781dbae80f194839a2632e Mon Sep 17 00:00:00 2001 From: Perry Li Date: Thu, 6 Mar 2025 10:10:53 +0000 Subject: [PATCH 223/623] fix(chat): resolve duplicate collapsible IDs causing citation modal failures Fix an issue where clicking inline citations in subsequent chat messages failed to open the citation modal when multiple collapsible sections are present. The root cause was duplicate "collapsible-sources" IDs assigned to all Collapsible components. This led document.getElementById() to always return the first instance, preventing subsequent messages from opening their CitationModal. Changes: - Modify Collapsible ID generation in Citations.svelte to use unique IDs with "collapsible-${message.id}" pattern - Update ResponseMessage.svelte's onSourceClick handler to reference the dynamic collapsible IDs - Ensure proper citation modal binding for each chat message's sources Affected components: - Collapsible (expandable content sections) - CitationsModal (citation detail popup) This ensures each chat message's sources are independently collapsible and maintains proper citation modal binding throughout message history. --- src/lib/components/chat/Messages/Citations.svelte | 2 +- src/lib/components/chat/Messages/ResponseMessage.svelte | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/Messages/Citations.svelte b/src/lib/components/chat/Messages/Citations.svelte index 893a64608b2..4a836133864 100644 --- a/src/lib/components/chat/Messages/Citations.svelte +++ b/src/lib/components/chat/Messages/Citations.svelte @@ -124,7 +124,7 @@
{:else} { console.log(id, idx); let sourceButton = document.getElementById(`source-${message.id}-${idx}`); - const sourcesCollapsible = document.getElementById(`collapsible-sources`); + const sourcesCollapsible = document.getElementById(`collapsible-${message.id}`); if (sourceButton) { sourceButton.click(); From 561f4d5d69a8435db2e1cd9754609882c1809deb Mon Sep 17 00:00:00 2001 From: Panda Date: Thu, 6 Mar 2025 11:26:04 +0100 Subject: [PATCH 224/623] i18n: zh-cn --- src/lib/i18n/locales/zh-CN/translation.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index ebb53a1b535..1c8f523c3dc 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -359,7 +359,7 @@ "Embedding model set to \"{{embedding_model}}\"": "语义向量模型设置为 \"{{embedding_model}}\"", "Enable API Key": "启用 API 密钥", "Enable autocomplete generation for chat messages": "启用聊天消息的输入框内容猜测补全", - "Enable Code Execution": "", + "Enable Code Execution": "启用代码执行", "Enable Code Interpreter": "启用代码解释器", "Enable Community Sharing": "启用分享至社区", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。", @@ -451,7 +451,7 @@ "Example: mail": "例如:mail", "Example: ou=users,dc=foo,dc=example": "例如:ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "例如:sAMAccountName 或 uid 或 userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "超出了许可证中的席位数量。请联系支持人员以增加席位数量。", "Exclude": "排除", "Execute code for analysis": "执行代码进行分析", "Expand": "展开", @@ -1159,7 +1159,7 @@ "Write your model template content here": "在此写入模型模板内容", "Yesterday": "昨天", "You": "你", - "You are currently using a trial license. Please contact support to upgrade your license.": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "您目前正在使用试用许可证。请联系支持人员升级您的许可证。", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "每次对话最多仅能附上 {{maxCount}} 个文件。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。", "You cannot upload an empty file.": "请勿上传空文件。", From 98376fbbce34d856122046724a25f7dfb80c5fe7 Mon Sep 17 00:00:00 2001 From: Panda Date: Thu, 6 Mar 2025 13:08:35 +0100 Subject: [PATCH 225/623] i18n: zh-cn --- src/lib/i18n/locales/zh-CN/translation.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 1c8f523c3dc..f821101c6d6 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -451,7 +451,7 @@ "Example: mail": "例如:mail", "Example: ou=users,dc=foo,dc=example": "例如:ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "例如:sAMAccountName 或 uid 或 userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "超出了许可证中的席位数量。请联系支持人员以增加席位数量。", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "已达到最大授权人数,请联系支持人员提升授权人数。", "Exclude": "排除", "Execute code for analysis": "执行代码进行分析", "Expand": "展开", @@ -640,7 +640,7 @@ "Local Models": "本地模型", "Location access not allowed": "不允许访问位置信息", "Logit Bias": "Logit 偏置", - "Lost": "丢失", + "Lost": "落败", "LTR": "从左至右", "Made by Open WebUI Community": "由 OpenWebUI 社区制作", "Make sure to enclose them with": "确保将它们包含在内", @@ -1159,7 +1159,7 @@ "Write your model template content here": "在此写入模型模板内容", "Yesterday": "昨天", "You": "你", - "You are currently using a trial license. Please contact support to upgrade your license.": "您目前正在使用试用许可证。请联系支持人员升级您的许可证。", + "You are currently using a trial license. Please contact support to upgrade your license.": "当前为试用许可证,请联系支持人员升级许可证。", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "每次对话最多仅能附上 {{maxCount}} 个文件。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。", "You cannot upload an empty file.": "请勿上传空文件。", From 92fb1109b6e53bef38ec7fe433e162de83a4cb7a Mon Sep 17 00:00:00 2001 From: orenzhang Date: Thu, 6 Mar 2025 20:16:34 +0800 Subject: [PATCH 226/623] i18n(common): add i18n translation --- src/lib/components/AddConnectionModal.svelte | 2 +- src/lib/components/admin/Settings/Documents.svelte | 8 ++++---- src/lib/components/admin/Settings/WebSearch.svelte | 4 ++-- src/lib/components/channel/Messages.svelte | 13 +++++++++---- src/lib/components/chat/Messages/CodeBlock.svelte | 2 +- src/lib/components/common/FileItemModal.svelte | 4 ++-- .../workspace/common/AccessControl.svelte | 4 ++-- src/lib/i18n/locales/ar-BH/translation.json | 6 ++++++ src/lib/i18n/locales/bg-BG/translation.json | 6 ++++++ src/lib/i18n/locales/bn-BD/translation.json | 6 ++++++ src/lib/i18n/locales/ca-ES/translation.json | 6 ++++++ src/lib/i18n/locales/ceb-PH/translation.json | 6 ++++++ src/lib/i18n/locales/cs-CZ/translation.json | 6 ++++++ src/lib/i18n/locales/da-DK/translation.json | 6 ++++++ src/lib/i18n/locales/de-DE/translation.json | 6 ++++++ src/lib/i18n/locales/dg-DG/translation.json | 6 ++++++ src/lib/i18n/locales/el-GR/translation.json | 6 ++++++ src/lib/i18n/locales/en-GB/translation.json | 6 ++++++ src/lib/i18n/locales/en-US/translation.json | 6 ++++++ src/lib/i18n/locales/es-ES/translation.json | 6 ++++++ src/lib/i18n/locales/eu-ES/translation.json | 6 ++++++ src/lib/i18n/locales/fa-IR/translation.json | 6 ++++++ src/lib/i18n/locales/fi-FI/translation.json | 6 ++++++ src/lib/i18n/locales/fr-CA/translation.json | 6 ++++++ src/lib/i18n/locales/fr-FR/translation.json | 6 ++++++ src/lib/i18n/locales/he-IL/translation.json | 6 ++++++ src/lib/i18n/locales/hi-IN/translation.json | 6 ++++++ src/lib/i18n/locales/hr-HR/translation.json | 6 ++++++ src/lib/i18n/locales/hu-HU/translation.json | 6 ++++++ src/lib/i18n/locales/id-ID/translation.json | 6 ++++++ src/lib/i18n/locales/ie-GA/translation.json | 6 ++++++ src/lib/i18n/locales/it-IT/translation.json | 6 ++++++ src/lib/i18n/locales/ja-JP/translation.json | 6 ++++++ src/lib/i18n/locales/ka-GE/translation.json | 6 ++++++ src/lib/i18n/locales/ko-KR/translation.json | 6 ++++++ src/lib/i18n/locales/lt-LT/translation.json | 6 ++++++ src/lib/i18n/locales/ms-MY/translation.json | 6 ++++++ src/lib/i18n/locales/nb-NO/translation.json | 6 ++++++ src/lib/i18n/locales/nl-NL/translation.json | 6 ++++++ src/lib/i18n/locales/pa-IN/translation.json | 6 ++++++ src/lib/i18n/locales/pl-PL/translation.json | 6 ++++++ src/lib/i18n/locales/pt-BR/translation.json | 6 ++++++ src/lib/i18n/locales/pt-PT/translation.json | 6 ++++++ src/lib/i18n/locales/ro-RO/translation.json | 6 ++++++ src/lib/i18n/locales/ru-RU/translation.json | 6 ++++++ src/lib/i18n/locales/sk-SK/translation.json | 6 ++++++ src/lib/i18n/locales/sr-RS/translation.json | 6 ++++++ src/lib/i18n/locales/sv-SE/translation.json | 6 ++++++ src/lib/i18n/locales/th-TH/translation.json | 6 ++++++ src/lib/i18n/locales/tk-TW/translation.json | 6 ++++++ src/lib/i18n/locales/tr-TR/translation.json | 6 ++++++ src/lib/i18n/locales/uk-UA/translation.json | 6 ++++++ src/lib/i18n/locales/ur-PK/translation.json | 6 ++++++ src/lib/i18n/locales/vi-VN/translation.json | 6 ++++++ src/lib/i18n/locales/zh-CN/translation.json | 6 ++++++ src/lib/i18n/locales/zh-TW/translation.json | 6 ++++++ 56 files changed, 315 insertions(+), 16 deletions(-) diff --git a/src/lib/components/AddConnectionModal.svelte b/src/lib/components/AddConnectionModal.svelte index cbd90b68daf..f3132640a08 100644 --- a/src/lib/components/AddConnectionModal.svelte +++ b/src/lib/components/AddConnectionModal.svelte @@ -179,7 +179,7 @@
- + + + + + {#each tags as tag} - {#if !isFirstMessage && !readOnly} + {#if !readOnly && siblings.length > 1}
- {#if $user.role === 'admin' || $user?.permissions.chat?.controls} -
- {#if chatFiles.length > 0} - -
- {#each chatFiles as file, fileIdx} - { - // Remove the file from the chatFiles array +
+ {#if chatFiles.length > 0} + +
+ {#each chatFiles as file, fileIdx} + { + // Remove the file from the chatFiles array - chatFiles.splice(fileIdx, 1); - chatFiles = chatFiles; - }} - on:click={() => { - console.log(file); - }} - /> - {/each} -
-
- -
- {/if} - - -
- + chatFiles.splice(fileIdx, 1); + chatFiles = chatFiles; + }} + on:click={() => { + console.log(file); + }} + /> + {/each}

+ {/if} + + +
+ +
+
+ + {#if $user.role === 'admin' || $user?.permissions.chat?.controls} +
@@ -90,10 +90,6 @@
-
- {:else} -
- {$i18n.t('You do not have permission to access this feature.')} -
- {/if} + {/if} +
diff --git a/src/lib/components/chat/ModelSelector/Selector.svelte b/src/lib/components/chat/ModelSelector/Selector.svelte index 46710787da0..4ac937121e7 100644 --- a/src/lib/components/chat/ModelSelector/Selector.svelte +++ b/src/lib/components/chat/ModelSelector/Selector.svelte @@ -350,7 +350,7 @@ selectedTag = ''; }} > - {$i18n.t('Ollama')} + {$i18n.t('Local')} + + {#each tags as tag} diff --git a/src/lib/components/chat/Navbar.svelte b/src/lib/components/chat/Navbar.svelte index 5040cdd9057..890d2369ba0 100644 --- a/src/lib/components/chat/Navbar.svelte +++ b/src/lib/components/chat/Navbar.svelte @@ -130,21 +130,19 @@ {/if} - {#if !$mobile && ($user.role === 'admin' || $user?.permissions?.chat?.controls)} - - - - {/if} + + +
{:else if token.type === 'blockquote'} -
- -
+ {@const alert = alertComponent(token)} + {#if alert} + + {:else} +
+ +
+ {/if} {:else if token.type === 'list'} {#if token.ordered}
    From dbf051ff4ec998a784bd8e095ea94fa8207c64ee Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 15 Mar 2025 01:56:04 +0000 Subject: [PATCH 315/623] enh: always expand details option --- .../Messages/Markdown/MarkdownTokens.svelte | 2 ++ .../components/chat/Settings/Interface.svelte | 27 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index 8de359a26c7..f49533f6a1e 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -18,6 +18,7 @@ import Tooltip from '$lib/components/common/Tooltip.svelte'; import ArrowDownTray from '$lib/components/icons/ArrowDownTray.svelte'; import Source from './Source.svelte'; + import { settings } from '$lib/stores'; const dispatch = createEventDispatcher(); @@ -242,6 +243,7 @@ {:else if token.type === 'details'} { + expandDetails = !expandDetails; + saveSettings({ expandDetails }); + }; + const toggleSplitLargeChunks = async () => { splitLargeChunks = !splitLargeChunks; saveSettings({ splitLargeChunks: splitLargeChunks }); @@ -570,6 +577,26 @@
+
+
+
{$i18n.t('Always Expand Details')}
+ + +
+
+
From 7723705707da90393ccb7860a39b22e129b693ab Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 15 Mar 2025 02:01:59 +0000 Subject: [PATCH 316/623] enh: always collapse code block --- .../components/chat/Messages/CodeBlock.svelte | 2 +- .../Messages/Markdown/MarkdownTokens.svelte | 1 + .../components/chat/Settings/Interface.svelte | 33 +++++++++++++++++-- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/lib/components/chat/Messages/CodeBlock.svelte b/src/lib/components/chat/Messages/CodeBlock.svelte index 40103102ae5..ca663f61de6 100644 --- a/src/lib/components/chat/Messages/CodeBlock.svelte +++ b/src/lib/components/chat/Messages/CodeBlock.svelte @@ -27,6 +27,7 @@ export let save = false; export let run = true; + export let collapsed = false; export let token; export let lang = ''; @@ -60,7 +61,6 @@ let result = null; let files = null; - let collapsed = false; let copied = false; let saved = false; diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index f49533f6a1e..95546e97d99 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -85,6 +85,7 @@ {#if token.raw.includes('```')} { + const toggleExpandDetails = () => { expandDetails = !expandDetails; saveSettings({ expandDetails }); }; + const toggleCollapseCodeBlocks = () => { + collapseCodeBlocks = !collapseCodeBlocks; + saveSettings({ collapseCodeBlocks }); + }; + const toggleSplitLargeChunks = async () => { splitLargeChunks = !splitLargeChunks; saveSettings({ splitLargeChunks: splitLargeChunks }); @@ -234,6 +240,9 @@ richTextInput = $settings.richTextInput ?? true; largeTextAsFile = $settings.largeTextAsFile ?? false; + collapseCodeBlocks = $settings.collapseCodeBlocks ?? false; + expandDetails = $settings.expandDetails ?? false; + landingPageMode = $settings.landingPageMode ?? ''; chatBubble = $settings.chatBubble ?? true; widescreenMode = $settings.widescreenMode ?? false; @@ -577,6 +586,26 @@
+
+
+
{$i18n.t('Always Collapse Code Blocks')}
+ + +
+
+
{$i18n.t('Always Expand Details')}
@@ -584,7 +613,7 @@
- {#if !readOnly && siblings.length > 1} + {#if !readOnly && (!isFirstMessage || siblings.length > 1)}
diff --git a/src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte b/src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte index aa0cfbe0fbc..874c639bcae 100644 --- a/src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte +++ b/src/lib/components/chat/Messages/Markdown/AlertRenderer.svelte @@ -1,82 +1,82 @@
-

- - {alert.type} -

- +

+ + {alert.type} +

+
diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index 1f7b889e221..678caf7eca3 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -176,7 +176,7 @@ {:else if token.type === 'blockquote'} {@const alert = alertComponent(token)} {#if alert} - + {:else}
diff --git a/src/lib/components/common/Checkbox.svelte b/src/lib/components/common/Checkbox.svelte index 9d5f8b54e59..feae33cd253 100644 --- a/src/lib/components/common/Checkbox.svelte +++ b/src/lib/components/common/Checkbox.svelte @@ -15,10 +15,12 @@ class=" outline -outline-offset-1 outline-[1.5px] outline-gray-200 dark:outline-gray-600 {state !== 'unchecked' ? 'bg-black outline-black ' - : 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative {disabled ? 'opacity-50 cursor-not-allowed' : ''}" + : 'hover:outline-gray-500 hover:bg-gray-50 dark:hover:bg-gray-800'} text-white transition-all rounded-sm inline-block w-3.5 h-3.5 relative {disabled + ? 'opacity-50 cursor-not-allowed' + : ''}" on:click={() => { if (disabled) return; - + if (_state === 'unchecked') { _state = 'checked'; dispatch('change', _state); diff --git a/src/lib/components/common/FileItem.svelte b/src/lib/components/common/FileItem.svelte index 476bd9c1056..772b078584d 100644 --- a/src/lib/components/common/FileItem.svelte +++ b/src/lib/components/common/FileItem.svelte @@ -101,7 +101,11 @@
{:else} - +
{#if loading} diff --git a/src/lib/components/workspace/Models/FiltersSelector.svelte b/src/lib/components/workspace/Models/FiltersSelector.svelte index 30a4c88fde8..fa595d6f822 100644 --- a/src/lib/components/workspace/Models/FiltersSelector.svelte +++ b/src/lib/components/workspace/Models/FiltersSelector.svelte @@ -39,7 +39,11 @@
{ if (!_filters[filter].is_global) { diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index 9ac6412f852..2d83300d98f 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "هل تملك حساب ؟", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "مساعد", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "وصف", "Didn't fully follow instructions": "لم أتبع التعليمات بشكل كامل", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "المستند", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "أدخل Chunk الحجم", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "مطالبات التصدير", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "فشل في إنشاء مفتاح API.", @@ -983,6 +990,7 @@ "System": "النظام", "System Instructions": "", "System Prompt": "محادثة النظام", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "لا تملك محادثات محفوظه", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index ea9d097ea43..99522767ccc 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Вече имате акаунт?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Винаги", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Невероятно", "an assistant": "асистент", "Analyzed": "Анализирано", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Опишете вашата база от знания и цели", "Description": "Описание", "Didn't fully follow instructions": "Не следва напълно инструкциите", + "Direct": "", "Direct Connections": "Директни връзки", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Директните връзки позволяват на потребителите да се свързват със собствени OpenAI съвместими API крайни точки.", "Direct Connections settings updated": "Настройките за директни връзки са актуализирани", @@ -315,6 +318,8 @@ "Dive into knowledge": "Потопете се в знанието", "Do not install functions from sources you do not fully trust.": "Не инсталирайте функции от източници, на които не се доверявате напълно.", "Do not install tools from sources you do not fully trust.": "Не инсталирайте инструменти от източници, на които не се доверявате напълно.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Документ", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Въведете размер на чънк", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Въведете описание", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Въведете домейни, разделени със запетаи (напр. example.com,site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Експортване на промптове", "Export to CSV": "Експортиране в CSV", "Export Tools": "Експортиране на инструменти", + "External": "", "External Models": "Външни модели", "Failed to add file.": "Неуспешно добавяне на файл.", "Failed to create API Key.": "Неуспешно създаване на API ключ.", @@ -983,6 +990,7 @@ "System": "Система", "System Instructions": "Системни инструкции", "System Prompt": "Системен Промпт", + "Tags": "", "Tags Generation": "Генериране на тагове", "Tags Generation Prompt": "Промпт за генериране на тагове", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Можете да чатите с максимум {{maxCount}} файл(а) наведнъж.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете да персонализирате взаимодействията си с LLM-и, като добавите спомени чрез бутона 'Управление' по-долу, правейки ги по-полезни и съобразени с вас.", "You cannot upload an empty file.": "Не можете да качите празен файл.", - "You do not have permission to access this feature.": "Нямате разрешение за достъп до тази функция.", "You do not have permission to upload files": "Нямате разрешение да качвате файлове", "You do not have permission to upload files.": "Нямате разрешение да качвате файлове.", "You have no archived conversations.": "Нямате архивирани разговори.", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index b3087a64562..e921c061e67 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "আগে থেকেই একাউন্ট আছে?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "একটা এসিস্ট্যান্ট", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "বিবরণ", "Didn't fully follow instructions": "ইনস্ট্রাকশন সম্পূর্ণ অনুসরণ করা হয়নি", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "ডকুমেন্ট", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "চাংক সাইজ লিখুন", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "প্রম্পটগুলো একপোর্ট করুন", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "API Key তৈরি করা যায়নি।", @@ -983,6 +990,7 @@ "System": "সিস্টেম", "System Instructions": "", "System Prompt": "সিস্টেম প্রম্পট", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "আপনার কোনও আর্কাইভ করা কথোপকথন নেই।", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 94ad1f47050..89e95b55b6a 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Ja tens un compte?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045.", "Always": "Sempre", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Al·lucinant", "an assistant": "un assistent", "Analyzed": "Analitzat", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius", "Description": "Descripció", "Didn't fully follow instructions": "No s'han seguit les instruccions completament", + "Direct": "", "Direct Connections": "Connexions directes", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.", "Direct Connections settings updated": "Configuració de les connexions directes actualitzada", @@ -315,6 +318,8 @@ "Dive into knowledge": "Aprofundir en el coneixement", "Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.", "Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "Fa falta un punt de connexió i una clau per a Document Intelligence.", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Introdueix la mida del bloc", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Introdueix parelles de \"token:valor de biaix\" separats per comes (exemple: 5432:100, 413:-100)", "Enter description": "Introdueix la descripció", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Introdueix el punt de connexió de Document Intelligence", "Enter Document Intelligence Key": "Introdueix la clau de Document Intelligence", "Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Exportar les indicacions", "Export to CSV": "Exportar a CSV", "Export Tools": "Exportar les eines", + "External": "", "External Models": "Models externs", "Failed to add file.": "No s'ha pogut afegir l'arxiu.", "Failed to create API Key.": "No s'ha pogut crear la clau API.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "Instruccions de sistema", "System Prompt": "Indicació del Sistema", + "Tags": "", "Tags Generation": "Generació d'etiquetes", "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració.", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Només pots xatejar amb un màxim de {{maxCount}} fitxers alhora.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.", "You cannot upload an empty file.": "No es pot pujar un ariux buit.", - "You do not have permission to access this feature.": "No tens permís per accedir a aquesta funcionalitat", "You do not have permission to upload files": "No tens permisos per pujar arxius", "You do not have permission to upload files.": "No tens permisos per pujar arxius.", "You have no archived conversations.": "No tens converses arxivades.", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index 9196af8b0d3..7062ee1a216 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Naa na kay account ?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "usa ka katabang", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Deskripsyon", "Didn't fully follow instructions": "", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumento", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Isulod ang block size", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Export prompts", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "", "System Prompt": "Madasig nga Sistema", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 8fdbdf9672f..9375bb61f55 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Už máte účet?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "asistent", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Popis", "Didn't fully follow instructions": "Nenásledovali jste přesně všechny instrukce.", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Neinstalujte funkce ze zdrojů, kterým plně nedůvěřujete.", "Do not install tools from sources you do not fully trust.": "Neinstalujte nástroje ze zdrojů, kterým plně nedůvěřujete.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Zadejte velikost bloku", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Zadejte popis", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportovat prompty", "Export to CSV": "", "Export Tools": "Exportní nástroje", + "External": "", "External Models": "Externí modely", "Failed to add file.": "Nepodařilo se přidat soubor.", "Failed to create API Key.": "Nepodařilo se vytvořit API klíč.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "", "System Prompt": "Systémový prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Prompt pro generování značek", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Můžete komunikovat pouze s maximálně {{maxCount}} soubor(y) najednou.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Můžete personalizovat své interakce s LLM pomocí přidávání vzpomínek prostřednictvím tlačítka 'Spravovat' níže, což je učiní pro vás užitečnějšími a lépe přizpůsobenými.", "You cannot upload an empty file.": "Nemůžete nahrát prázdný soubor.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Nemáte žádné archivované konverzace.", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 9c6eeb5e379..422ab6569a9 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Har du allerede en profil?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "en assistent", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Beskrivelse", "Didn't fully follow instructions": "Fulgte ikke instruktioner", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Lad være med at installere funktioner fra kilder, som du ikke stoler på.", "Do not install tools from sources you do not fully trust.": "Lad være med at installere værktøjer fra kilder, som du ikke stoler på.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Indtast størrelse af tekststykker", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Eksportér prompts", "Export to CSV": "", "Export Tools": "Eksportér værktøjer", + "External": "", "External Models": "Eksterne modeller", "Failed to add file.": "", "Failed to create API Key.": "Kunne ikke oprette API-nøgle.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "", "System Prompt": "Systemprompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan kun chatte med maksimalt {{maxCount}} fil(er) ad gangen.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan personliggøre dine interaktioner med LLM'er ved at tilføje minder via knappen 'Administrer' nedenfor, hvilket gør dem mere nyttige og skræddersyet til dig.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Du har ingen arkiverede samtaler.", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 2b273dc80dc..221d8a20111 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Haben Sie bereits einen Account?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Immer", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Fantastisch", "an assistant": "ein Assistent", "Analyzed": "Analysiert", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Beschreibe deinen Wissensspeicher und deine Ziele", "Description": "Beschreibung", "Didn't fully follow instructions": "Nicht genau den Answeisungen gefolgt", + "Direct": "", "Direct Connections": "Direktverbindungen", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Direktverbindungen ermöglichen es Benutzern, sich mit ihren eigenen OpenAI-kompatiblen API-Endpunkten zu verbinden.", "Direct Connections settings updated": "Direktverbindungs-Einstellungen aktualisiert", @@ -315,6 +318,8 @@ "Dive into knowledge": "Tauchen Sie in das Wissen ein", "Do not install functions from sources you do not fully trust.": "Installieren Sie keine Funktionen aus Quellen, denen Sie nicht vollständig vertrauen.", "Do not install tools from sources you do not fully trust.": "Installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vollständig vertrauen.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Geben Sie die Blockgröße ein", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Geben Sie eine Beschreibung ein", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Geben Sie die Domains durch Kommas separiert ein (z.B. example.com,site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Prompts exportieren", "Export to CSV": "Als CSV exportieren", "Export Tools": "Werkzeuge exportieren", + "External": "", "External Models": "Externe Modelle", "Failed to add file.": "Fehler beim Hinzufügen der Datei.", "Failed to create API Key.": "Fehler beim Erstellen des API-Schlüssels.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "Systemanweisungen", "System Prompt": "System-Prompt", + "Tags": "", "Tags Generation": "Tag-Generierung", "Tags Generation Prompt": "Prompt für Tag-Generierung", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Sie können nur mit maximal {{maxCount}} Datei(en) gleichzeitig chatten.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Personalisieren Sie Interaktionen mit LLMs, indem Sie über die Schaltfläche \"Verwalten\" Erinnerungen hinzufügen.", "You cannot upload an empty file.": "Sie können keine leere Datei hochladen.", - "You do not have permission to access this feature.": "Sie haben keine Berechtigung, auf diese Funktion zuzugreifen.", "You do not have permission to upload files": "Sie haben keine Berechtigung, Dateien hochzuladen", "You do not have permission to upload files.": "Sie haben keine Berechtigung zum Hochladen von Dateien.", "You have no archived conversations.": "Du hast keine archivierten Unterhaltungen.", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 497089c38c0..f4f9d99af56 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Such account exists?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "such assistant", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Description", "Didn't fully follow instructions": "", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Enter Size of Chunk", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Export Promptos", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "", @@ -983,6 +990,7 @@ "System": "System very system", "System Instructions": "", "System Prompt": "System Prompt much prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index 8668100b51a..cc0f4a221b2 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Έχετε ήδη λογαριασμό;", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Καταπληκτικό", "an assistant": "ένας βοηθός", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Περιγράψτε τη βάση γνώσης και τους στόχους σας", "Description": "Περιγραφή", "Didn't fully follow instructions": "Δεν ακολούθησε πλήρως τις οδηγίες", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Βυθιστείτε στη γνώση", "Do not install functions from sources you do not fully trust.": "Μην εγκαθιστάτε λειτουργίες από πηγές που δεν εμπιστεύεστε πλήρως.", "Do not install tools from sources you do not fully trust.": "Μην εγκαθιστάτε εργαλεία από πηγές που δεν εμπιστεύεστε πλήρως.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Έγγραφο", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Εισάγετε το Μέγεθος Τμημάτων", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Εισάγετε την περιγραφή", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Εξαγωγή Προτροπών", "Export to CSV": "Εξαγωγή σε CSV", "Export Tools": "Εξαγωγή Εργαλείων", + "External": "", "External Models": "Εξωτερικά Μοντέλα", "Failed to add file.": "Αποτυχία προσθήκης αρχείου.", "Failed to create API Key.": "Αποτυχία δημιουργίας Κλειδιού API.", @@ -983,6 +990,7 @@ "System": "Σύστημα", "System Instructions": "Οδηγίες Συστήματος", "System Prompt": "Προτροπή Συστήματος", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Μπορείτε να συνομιλήσετε μόνο με μέγιστο αριθμό {{maxCount}} αρχείου(-ων) ταυτόχρονα.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Μπορείτε να προσωποποιήσετε τις αλληλεπιδράσεις σας με τα LLMs προσθέτοντας αναμνήσεις μέσω του κουμπιού 'Διαχείριση' παρακάτω, κάνοντάς τα πιο χρήσιμα και προσαρμοσμένα σε εσάς.", "You cannot upload an empty file.": "Δεν μπορείτε να ανεβάσετε ένα κενό αρχείο.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Δεν έχετε άδεια να ανεβάσετε αρχεία.", "You have no archived conversations.": "Δεν έχετε αρχειοθετημένες συνομιλίες.", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index aff1b821c15..41d481530dd 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "", "Didn't fully follow instructions": "", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "", @@ -983,6 +990,7 @@ "System": "", "System Instructions": "", "System Prompt": "", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index aff1b821c15..41d481530dd 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "", "Didn't fully follow instructions": "", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "", @@ -983,6 +990,7 @@ "System": "", "System Instructions": "", "System Prompt": "", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index ebbf201ed55..8ddee513cb3 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "¿Ya tienes una cuenta?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Siempre", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Sorprendente", "an assistant": "un asistente", "Analyzed": "Analizado", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Describe tu base de conocimientos y objetivos", "Description": "Descripción", "Didn't fully follow instructions": "No siguió las instrucciones", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Sumérgete en el conocimiento", "Do not install functions from sources you do not fully trust.": "No instale funciones desde fuentes que no confíe totalmente.", "Do not install tools from sources you do not fully trust.": "No instale herramientas desde fuentes que no confíe totalmente.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Documento", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Ingrese el tamaño del fragmento", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Ingrese la descripción", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportar Prompts", "Export to CSV": "Exportar a CSV", "Export Tools": "Exportar Herramientas", + "External": "", "External Models": "Modelos Externos", "Failed to add file.": "No se pudo agregar el archivo.", "Failed to create API Key.": "No se pudo crear la clave API.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "Instrucciones del sistema", "System Prompt": "Prompt del sistema", + "Tags": "", "Tags Generation": "Generación de etiquetas", "Tags Generation Prompt": "Prompt de generación de etiquetas", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Solo puede chatear con un máximo de {{maxCount}} archivo(s) a la vez.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puede personalizar sus interacciones con LLMs añadiendo memorias a través del botón 'Gestionar' debajo, haciendo que sean más útiles y personalizados para usted.", "You cannot upload an empty file.": "No puede subir un archivo vacío.", - "You do not have permission to access this feature.": "No tiene permiso para acceder a esta función.", "You do not have permission to upload files": "No tiene permiso para subir archivos", "You do not have permission to upload files.": "No tiene permiso para subir archivos.", "You have no archived conversations.": "No tiene conversaciones archivadas.", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index 5c9f674f22d..b70c45f62d0 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Baduzu kontu bat?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Harrigarria", "an assistant": "laguntzaile bat", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Deskribatu zure ezagutza-basea eta helburuak", "Description": "Deskribapena", "Didn't fully follow instructions": "Ez ditu jarraibideak guztiz jarraitu", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Murgildu ezagutzan", "Do not install functions from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen funtzioak.", "Do not install tools from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen tresnak.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumentua", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Sartu Zati Tamaina", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Sartu deskribapena", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Esportatu Promptak", "Export to CSV": "Esportatu CSVra", "Export Tools": "Esportatu Tresnak", + "External": "", "External Models": "Kanpoko Ereduak", "Failed to add file.": "Huts egin du fitxategia gehitzean.", "Failed to create API Key.": "Huts egin du API Gakoa sortzean.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "Sistema jarraibideak", "System Prompt": "Sistema prompta", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Etiketa sortzeko prompta", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Gehienez {{maxCount}} fitxategirekin txateatu dezakezu aldi berean.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "LLMekin dituzun interakzioak pertsonalizatu ditzakezu memoriak gehituz beheko 'Kudeatu' botoiaren bidez, lagungarriagoak eta zuretzat egokituagoak eginez.", "You cannot upload an empty file.": "Ezin duzu fitxategi huts bat kargatu.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Ez duzu fitxategiak kargatzeko baimenik.", "You have no archived conversations.": "Ez duzu artxibatutako elkarrizketarik.", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 14a80d3b670..d6198386ecd 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "از قبل حساب کاربری دارید؟", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "یک دستیار", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "توضیحات", "Didn't fully follow instructions": "نمی تواند دستورالعمل را کامل پیگیری کند", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "سند", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "مقدار Chunk Size را وارد کنید", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "برون\u200cریزی پرامپت\u200cها", "Export to CSV": "", "Export Tools": "برون\u200cریزی ابزارها", + "External": "", "External Models": "مدل\u200cهای بیرونی", "Failed to add file.": "خطا در افزودن پرونده", "Failed to create API Key.": "ایجاد کلید API با خطا مواجه شد.", @@ -983,6 +990,7 @@ "System": "سیستم", "System Instructions": "", "System Prompt": "پرامپت سیستم", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "شما در هر زمان نهایتا می\u200cتوانید با {{maxCount}} پرونده گفتگو کنید.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "شما هیچ گفتگوی ذخیره شده ندارید.", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index 7d2307a3739..1baca51c27c 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Onko sinulla jo tili?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Aina", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Hämmästyttävä", "an assistant": "avustaja", "Analyzed": "Analysoitu", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Kuvaa tietokantasi ja tavoitteesi", "Description": "Kuvaus", "Didn't fully follow instructions": "Ei noudattanut ohjeita täysin", + "Direct": "", "Direct Connections": "Suorat yhteydet", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Suorat yhteydet mahdollistavat käyttäjien yhdistää omia OpenAI-yhteensopivia API-päätepisteitä.", "Direct Connections settings updated": "Suorien yhteyksien asetukset päivitetty", @@ -315,6 +318,8 @@ "Dive into knowledge": "Uppoudu tietoon", "Do not install functions from sources you do not fully trust.": "Älä asenna toimintoja lähteistä, joihin et luota täysin.", "Do not install tools from sources you do not fully trust.": "Älä asenna työkaluja lähteistä, joihin et luota täysin.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Asiakirja", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Syötä osien koko", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Kirjoita kuvaus", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Verkko-osoitteet erotetaan pilkulla (esim. esimerkki.com,sivu.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Vie kehotteet", "Export to CSV": "Vie CSV-tiedostoon", "Export Tools": "Vie työkalut", + "External": "", "External Models": "Ulkoiset mallit", "Failed to add file.": "Tiedoston lisääminen epäonnistui.", "Failed to create API Key.": "API-avaimen luonti epäonnistui.", @@ -983,6 +990,7 @@ "System": "Järjestelmä", "System Instructions": "Järjestelmäohjeet", "System Prompt": "Järjestelmäkehote", + "Tags": "", "Tags Generation": "Tagien luonti", "Tags Generation Prompt": "Tagien luontikehote", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Voit keskustella enintään {{maxCount}} tiedoston kanssa kerralla.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Voit personoida vuorovaikutustasi LLM-ohjelmien kanssa lisäämällä muistoja 'Hallitse'-painikkeen kautta, jolloin ne ovat hyödyllisempiä ja räätälöityjä sinua varten.", "You cannot upload an empty file.": "Et voi ladata tyhjää tiedostoa.", - "You do not have permission to access this feature.": "Sinulla ei ole lupaa tähän ominaisuuteen.", "You do not have permission to upload files": "Sinulla ei ole lupaa ladata tiedostoja", "You do not have permission to upload files.": "Sinulla ei ole lupaa ladata tiedostoja.", "You have no archived conversations.": "Sinulla ei ole arkistoituja keskusteluja.", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index 627293dbae4..69b0b0bbf7e 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Avez-vous déjà un compte ?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "un assistant", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Description", "Didn't fully follow instructions": "N'a pas entièrement respecté les instructions", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Entrez la taille de bloc", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exporter les Prompts", "Export to CSV": "", "Export Tools": "Outils d'exportation", + "External": "", "External Models": "Modèles externes", "Failed to add file.": "", "Failed to create API Key.": "Échec de la création de la clé API.", @@ -983,6 +990,7 @@ "System": "Système", "System Instructions": "", "System Prompt": "Prompt du système", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des souvenirs via le bouton 'Gérer' ci-dessous, ce qui les rendra plus utiles et adaptés à vos besoins.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Vous n'avez aucune conversation archivée", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 2432c80c4f4..60105c712cc 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Avez-vous déjà un compte ?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Incroyable", "an assistant": "un assistant", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Décrivez votre base de connaissances et vos objectifs", "Description": "Description", "Didn't fully follow instructions": "N'a pas entièrement respecté les instructions", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Plonger dans les connaissances", "Do not install functions from sources you do not fully trust.": "N'installez pas de fonctions provenant de sources auxquelles vous ne faites pas entièrement confiance.", "Do not install tools from sources you do not fully trust.": "N'installez pas d'outils provenant de sources auxquelles vous ne faites pas entièrement confiance.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Entrez la taille des chunks", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Entrez la description", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exporter des prompts", "Export to CSV": "Exporter en CSV", "Export Tools": "Exporter des outils", + "External": "", "External Models": "Modèles externes", "Failed to add file.": "Échec de l'ajout du fichier.", "Failed to create API Key.": "Échec de la création de la clé API.", @@ -983,6 +990,7 @@ "System": "Système", "System Instructions": "Instructions système", "System Prompt": "Prompt système", + "Tags": "", "Tags Generation": "Génération de tags", "Tags Generation Prompt": "Prompt de génération de tags", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Vous ne pouvez discuter qu'avec un maximum de {{maxCount}} fichier(s) à la fois.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des mémoires à l'aide du bouton « Gérer » ci-dessous, ce qui les rendra plus utiles et mieux adaptées à vos besoins.", "You cannot upload an empty file.": "Vous ne pouvez pas envoyer un fichier vide.", - "You do not have permission to access this feature.": "Vous n'avez pas la permission d'accéder à cette fonctionnalité.", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Vous n'avez pas la permission de télécharger des fichiers.", "You have no archived conversations.": "Vous n'avez aucune conversation archivée.", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index e2622584e27..8df4b334c7e 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "כבר יש לך חשבון?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "עוזר", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "תיאור", "Didn't fully follow instructions": "לא עקב אחרי ההוראות באופן מלא", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "מסמך", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "הזן גודל נתונים", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "ייצוא פקודות", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "יצירת מפתח API נכשלה.", @@ -983,6 +990,7 @@ "System": "מערכת", "System Instructions": "", "System Prompt": "תגובת מערכת", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "אין לך שיחות בארכיון.", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 0bda78ca2db..47630882963 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "क्या आपके पास पहले से एक खाता मौजूद है?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "एक सहायक", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "विवरण", "Didn't fully follow instructions": "निर्देशों का पूरी तरह से पालन नहीं किया", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "दस्तावेज़", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "खंड आकार दर्ज करें", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "प्रॉम्प्ट निर्यात करें", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "एपीआई कुंजी बनाने में विफल.", @@ -983,6 +990,7 @@ "System": "सिस्टम", "System Instructions": "", "System Prompt": "सिस्टम प्रॉम्प्ट", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "आपको कोई अंकित चैट नहीं है।", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index befeeff4669..db1d2ba4afc 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Već imate račun?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "asistent", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Opis", "Didn't fully follow instructions": "Nije u potpunosti slijedio upute", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Unesite veličinu dijela", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Izvoz prompta", "Export to CSV": "", "Export Tools": "Izvoz alata", + "External": "", "External Models": "Vanjski modeli", "Failed to add file.": "", "Failed to create API Key.": "Neuspješno stvaranje API ključa.", @@ -983,6 +990,7 @@ "System": "Sustav", "System Instructions": "", "System Prompt": "Sistemski prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Možete personalizirati svoje interakcije s LLM-ima dodavanjem uspomena putem gumba 'Upravljanje' u nastavku, čineći ih korisnijima i prilagođenijima vama.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Nemate arhiviranih razgovora.", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index ecf16ca327b..107b9ba3110 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Már van fiókod?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "egy asszisztens", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Leírás", "Didn't fully follow instructions": "Nem követte teljesen az utasításokat", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Ne telepíts funkciókat olyan forrásokból, amelyekben nem bízol teljesen.", "Do not install tools from sources you do not fully trust.": "Ne telepíts eszközöket olyan forrásokból, amelyekben nem bízol teljesen.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumentum", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Add meg a darab méretet", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Add meg a leírást", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Promptok exportálása", "Export to CSV": "", "Export Tools": "Eszközök exportálása", + "External": "", "External Models": "Külső modellek", "Failed to add file.": "Nem sikerült hozzáadni a fájlt.", "Failed to create API Key.": "Nem sikerült létrehozni az API kulcsot.", @@ -983,6 +990,7 @@ "System": "Rendszer", "System Instructions": "Rendszer utasítások", "System Prompt": "Rendszer prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Címke generálási prompt", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Egyszerre maximum {{maxCount}} fájllal tud csevegni.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Az LLM-ekkel való interakcióit személyre szabhatja emlékek hozzáadásával a lenti 'Kezelés' gomb segítségével, így azok még hasznosabbak és személyre szabottabbak lesznek.", "You cannot upload an empty file.": "Nem tölthet fel üres fájlt.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Nincsenek archivált beszélgetései.", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index d028ec2b4f4..d7e2624c219 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Sudah memiliki akun?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "asisten", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Deskripsi", "Didn't fully follow instructions": "Tidak sepenuhnya mengikuti instruksi", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumen", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Masukkan Ukuran Potongan", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Perintah Ekspor", "Export to CSV": "", "Export Tools": "Alat Ekspor", + "External": "", "External Models": "Model Eksternal", "Failed to add file.": "", "Failed to create API Key.": "Gagal membuat API Key.", @@ -983,6 +990,7 @@ "System": "Sistem", "System Instructions": "", "System Prompt": "Permintaan Sistem", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda dapat mempersonalisasi interaksi Anda dengan LLM dengan menambahkan kenangan melalui tombol 'Kelola' di bawah ini, sehingga lebih bermanfaat dan disesuaikan untuk Anda.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Anda tidak memiliki percakapan yang diarsipkan.", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 5e3faf43b40..06aa65ae394 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Tá cuntas agat cheana féin?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "I gcónaí", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Iontach", "an assistant": "cúntóir", "Analyzed": "Anailísithe", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Déan cur síos ar do bhunachar eolais agus do chuspóirí", "Description": "Cur síos", "Didn't fully follow instructions": "Níor lean sé treoracha go hiomlán", + "Direct": "", "Direct Connections": "Naisc Dhíreacha", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Ligeann Connections Direct d’úsáideoirí ceangal lena gcríochphointí API féin atá comhoiriúnach le OpenAI.", "Direct Connections settings updated": "Nuashonraíodh socruithe Connections Direct", @@ -315,6 +318,8 @@ "Dive into knowledge": "Léim isteach eolas", "Do not install functions from sources you do not fully trust.": "Ná suiteáil feidhmeanna ó fhoinsí nach bhfuil muinín iomlán agat.", "Do not install tools from sources you do not fully trust.": "Ná suiteáil uirlisí ó fhoinsí nach bhfuil muinín iomlán agat.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Doiciméad", "Document Intelligence": "Faisnéise Doiciméad", "Document Intelligence endpoint and key required.": "Críochphointe Faisnéise Doiciméad agus eochair ag teastáil.", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Cuir isteach Méid an Smután", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Iontráil cur síos", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Iontráil Críochphointe Faisnéise Doiciméid", "Enter Document Intelligence Key": "Iontráil Eochair Faisnéise Doiciméad", "Enter domains separated by commas (e.g., example.com,site.org)": "Cuir isteach fearainn atá scartha le camóga (m.sh., example.com,site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Leideanna Easpórtála", "Export to CSV": "Easpórtáil go CSV", "Export Tools": "Uirlisí Easpór", + "External": "", "External Models": "Múnlaí Seachtracha", "Failed to add file.": "Theip ar an gcomhad a chur leis.", "Failed to create API Key.": "Theip ar an eochair API a chruthú.", @@ -983,6 +990,7 @@ "System": "Córas", "System Instructions": "Treoracha Córas", "System Prompt": "Córas Leid", + "Tags": "", "Tags Generation": "Giniúint Clibeanna", "Tags Generation Prompt": "Clibeanna Giniúint Leid", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Úsáidtear sampláil saor ó eireabaill chun tionchar na n-chomharthaí ón aschur nach bhfuil chomh dóchúil céanna a laghdú. Laghdóidh luach níos airde (m.sh., 2.0) an tionchar níos mó, agus díchumasaíonn luach 1.0 an socrú seo. (réamhshocraithe: 1)", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Ní féidir leat comhrá a dhéanamh ach le comhad {{maxCount}} ar a mhéad ag an am.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Is féidir leat do chuid idirghníomhaíochtaí le LLManna a phearsantú ach cuimhní cinn a chur leis tríd an gcnaipe 'Bainistigh' thíos, rud a fhágann go mbeidh siad níos cabhrach agus níos oiriúnaí duit.", "You cannot upload an empty file.": "Ní féidir leat comhad folamh a uaslódáil.", - "You do not have permission to access this feature.": "Níl cead agat rochtain a fháil ar an ngné seo.", "You do not have permission to upload files": "Níl cead agat comhaid a uaslódáil", "You do not have permission to upload files.": "Níl cead agat comhaid a uaslódáil.", "You have no archived conversations.": "Níl aon chomhráite cartlainne agat.", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index a189ca68da5..51918808acb 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Hai già un account?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "un assistente", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Descrizione", "Didn't fully follow instructions": "Non ha seguito completamente le istruzioni", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Documento", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Inserisci la dimensione chunk", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Esporta prompt", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "Impossibile creare la chiave API.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "", "System Prompt": "Prompt di sistema", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Non hai conversazioni archiviate.", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index b0b9fc5f74f..11ac2023e6f 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "すでにアカウントをお持ちですか?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "アシスタント", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "説明", "Didn't fully follow instructions": "説明に沿って操作していませんでした", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "信頼できないソースからFunctionをインストールしないでください。", "Do not install tools from sources you do not fully trust.": "信頼出来ないソースからツールをインストールしないでください。", + "Docling": "", + "Docling Server URL required.": "", "Document": "ドキュメント", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "チャンクサイズを入力してください", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "プロンプトをエクスポート", "Export to CSV": "", "Export Tools": "ツールのエクスポート", + "External": "", "External Models": "外部モデル", "Failed to add file.": "", "Failed to create API Key.": "APIキーの作成に失敗しました。", @@ -983,6 +990,7 @@ "System": "システム", "System Instructions": "", "System Prompt": "システムプロンプト", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "これまでにアーカイブされた会話はありません。", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 1e82b51b791..bb6e324282f 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "უკვე გაქვთ ანგარიში?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "ყოველთვის", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "გადასარევია", "an assistant": "დამხმარე", "Analyzed": "გაანაზლიებულია", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "აღწერა", "Didn't fully follow instructions": "ინსტრუქციებს სრულად არ მივყევი", + "Direct": "", "Direct Connections": "პირდაპირი მიერთება", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "დოკუმენტი", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "შეიყვანე ფრაგმენტის ზომა", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "შეიყვანეთ აღწერა", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "მოთხოვნების გატანა", "Export to CSV": "CVS-ში გატანა", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "ფაილის დამატების შეცდომა.", "Failed to create API Key.": "API-ის გასაღების შექმნა ჩავარდა.", @@ -983,6 +990,7 @@ "System": "სისტემა", "System Instructions": "", "System Prompt": "სისტემური მოთხოვნა", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "დაარქივებული საუბრები არ გაქვთ.", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index 545b9842771..3a8c33ff0cf 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "이미 계정이 있으신가요?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "놀라움", "an assistant": "어시스턴트", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "지식 기반에 대한 설명과 목적을 입력하세요", "Description": "설명", "Didn't fully follow instructions": "완전히 지침을 따르지 않음", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "불분명한 출처를 가진 함수를 설치하지마세요", "Do not install tools from sources you do not fully trust.": "불분명한 출처를 가진 도구를 설치하지마세요", + "Docling": "", + "Docling Server URL required.": "", "Document": "문서", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "청크 크기 입력", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "설명 입력", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "프롬프트 내보내기", "Export to CSV": "", "Export Tools": "도구 내보내기", + "External": "", "External Models": "외부 모델", "Failed to add file.": "파일추가에 실패했습니다", "Failed to create API Key.": "API 키 생성에 실패했습니다.", @@ -983,6 +990,7 @@ "System": "시스템", "System Instructions": "시스템 설명서", "System Prompt": "시스템 프롬프트", + "Tags": "", "Tags Generation": "태그 생성", "Tags Generation Prompt": "태그 생성 프롬프트", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "동시에 최대 {{maxCount}} 파일과만 대화할 수 있습니다 ", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "아래 '관리' 버튼으로 메모리를 추가하여 LLM들과의 상호작용을 개인화할 수 있습니다. 이를 통해 더 유용하고 맞춤화된 경험을 제공합니다.", "You cannot upload an empty file.": "빈 파일을 업로드 할 수 없습니다", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "채팅을 보관한 적이 없습니다.", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 4fca2115ffe..9607dbcc377 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Ar jau turite paskyrą?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "assistentas", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Aprašymas", "Didn't fully follow instructions": "Pilnai nesekė instrukcijų", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Neinstaliuokite funkcijų iš nepatikimų šaltinių", "Do not install tools from sources you do not fully trust.": "Neinstaliuokite įrankių iš nepatikimų šaltinių", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumentas", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Įveskite blokų dydį", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Eksportuoti užklausas", "Export to CSV": "", "Export Tools": "Eksportuoti įrankius", + "External": "", "External Models": "Išoriniai modeliai", "Failed to add file.": "", "Failed to create API Key.": "Nepavyko sukurti API rakto", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "", "System Prompt": "Sistemos užklausa", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Galite pagerinti modelių darbą suteikdami jiems atminties funkcionalumą.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Jūs neturite archyvuotų pokalbių", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 07290ec363d..a4810c0b9f9 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Telah mempunyai akaun?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "seorang pembantu", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Penerangan", "Didn't fully follow instructions": "Tidak mengikut arahan sepenuhnya", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Jangan pasang fungsi daripada sumber yang anda tidak percayai sepenuhnya.", "Do not install tools from sources you do not fully trust.": "Jangan pasang alat daripada sumber yang anda tidak percaya sepenuhnya.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokumen", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Masukkan Saiz 'Chunk'", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Eksport Gesaan", "Export to CSV": "", "Export Tools": "Eksport Alat", + "External": "", "External Models": "Model Luaran", "Failed to add file.": "", "Failed to create API Key.": "Gagal mencipta kekunci API", @@ -983,6 +990,7 @@ "System": "Sistem", "System Instructions": "", "System Prompt": "Gesaan Sistem", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda boleh memperibadikan interaksi anda dengan LLM dengan menambahkan memori melalui butang 'Urus' di bawah, menjadikannya lebih membantu dan disesuaikan dengan anda.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Anda tidak mempunyai perbualan yang diarkibkan", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index d46a4dc15d5..977ffd89715 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Har du allerede en konto?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Alltid", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Flott", "an assistant": "en assistent", "Analyzed": "Analysert", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Beskriv kunnskapsbasen din og målene dine", "Description": "Beskrivelse", "Didn't fully follow instructions": "Fulgte ikke instruksjonene fullstendig", + "Direct": "", "Direct Connections": "Direkte koblinger", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Med direkte koblinger kan brukerne koble til egne OpenAI-kompatible API-endepunkter.", "Direct Connections settings updated": "Innstillinger for direkte koblinger er oppdatert", @@ -315,6 +318,8 @@ "Dive into knowledge": "Bli kjent med kunnskap", "Do not install functions from sources you do not fully trust.": "Ikke installer funksjoner fra kilder du ikke stoler på.", "Do not install tools from sources you do not fully trust.": "Ikke installer verktøy fra kilder du ikke stoler på.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "Intelligens i dokumenter", "Document Intelligence endpoint and key required.": "Det kreves et endepunkt og en nøkkel for Intelligens i dokumenter", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Angi Chunk-størrelse", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Angi beskrivelse", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Angi endepunkt for Intelligens i dokumenter", "Enter Document Intelligence Key": "Angi nøkkel for Intelligens i dokumenter", "Enter domains separated by commas (e.g., example.com,site.org)": "Angi domener atskilt med komma (f.eks. eksempel.com, side.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Eksporter ledetekster", "Export to CSV": "Eksporter til CSV", "Export Tools": "Eksporter verktøy", + "External": "", "External Models": "Eksterne modeller", "Failed to add file.": "Kan ikke legge til filen.", "Failed to create API Key.": "Kan ikke opprette en API-nøkkel.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "Systeminstruksjoner", "System Prompt": "Systemledetekst", + "Tags": "", "Tags Generation": "Genering av etiketter", "Tags Generation Prompt": "Ledetekst for genering av etikett", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan bare chatte med maksimalt {{maxCount}} fil(er) om gangen.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan tilpasse interaksjonene dine med språkmodeller ved å legge til minner gjennom Administrer-knappen nedenfor, slik at de blir mer til nyttige og tilpasset deg.", "You cannot upload an empty file.": "Du kan ikke laste opp en tom fil.", - "You do not have permission to access this feature.": "Du har ikke tillatelse til å bruke denne funksjonen.", "You do not have permission to upload files": "Du har ikke tillatelse til å laste opp filer", "You do not have permission to upload files.": "Du har ikke tillatelse til å laste opp filer.", "You have no archived conversations.": "Du har ingen arkiverte samtaler.", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 3bf608008de..5c2bb304103 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Heb je al een account?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Geweldig", "an assistant": "een assistent", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Beschrijf je kennisbasis en doelstellingen", "Description": "Beschrijving", "Didn't fully follow instructions": "Heeft niet alle instructies gevolgt", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Duik in kennis", "Do not install functions from sources you do not fully trust.": "Installeer geen functies vanuit bronnen die je niet volledig vertrouwt", "Do not install tools from sources you do not fully trust.": "Installeer geen tools vanuit bronnen die je niet volledig vertrouwt.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Voeg Chunk Size toe", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Voer beschrijving in", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exporteer Prompts", "Export to CSV": "Exporteer naar CSV", "Export Tools": "Exporteer gereedschappen", + "External": "", "External Models": "Externe modules", "Failed to add file.": "Het is niet gelukt om het bestand toe te voegen.", "Failed to create API Key.": "Kan API Key niet aanmaken.", @@ -983,6 +990,7 @@ "System": "Systeem", "System Instructions": "Systeem instructies", "System Prompt": "Systeem prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Prompt voor taggeneratie", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Je kunt slechts met maximaal {{maxCount}} bestand(en) tegelijk chatten", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Je kunt je interacties met LLM's personaliseren door herinneringen toe te voegen via de 'Beheer'-knop hieronder, waardoor ze nuttiger en voor jou op maat gemaakt worden.", "You cannot upload an empty file.": "Je kunt een leeg bestand niet uploaden.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Je hebt geen toestemming om bestanden up te loaden", "You have no archived conversations.": "Je hebt geen gearchiveerde gesprekken.", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index a46aff4465d..43908e2cdb9 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "ਪਹਿਲਾਂ ਹੀ ਖਾਤਾ ਹੈ?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "ਇੱਕ ਸਹਾਇਕ", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "ਵਰਣਨਾ", "Didn't fully follow instructions": "ਹਦਾਇਤਾਂ ਨੂੰ ਪੂਰੀ ਤਰ੍ਹਾਂ ਫਾਲੋ ਨਹੀਂ ਕੀਤਾ", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "ਡਾਕੂਮੈਂਟ", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "ਚੰਕ ਆਕਾਰ ਦਰਜ ਕਰੋ", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "ਪ੍ਰੰਪਟ ਨਿਰਯਾਤ ਕਰੋ", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "API ਕੁੰਜੀ ਬਣਾਉਣ ਵਿੱਚ ਅਸਫਲ।", @@ -983,6 +990,7 @@ "System": "ਸਿਸਟਮ", "System Instructions": "", "System Prompt": "ਸਿਸਟਮ ਪ੍ਰੰਪਟ", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "ਤੁਹਾਡੇ ਕੋਲ ਕੋਈ ਆਰਕਾਈਵ ਕੀਤੀਆਂ ਗੱਲਾਂ ਨਹੀਂ ਹਨ।", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index ab0f6cc95a3..649cb5ae21a 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Czy masz już konto?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Zawsze", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Niesamowite", "an assistant": "asystent", "Analyzed": "Przeanalizowane", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Opisz swoją bazę wiedzy i cele", "Description": "Opis", "Didn't fully follow instructions": "Nie wykonał w pełni instrukcji", + "Direct": "", "Direct Connections": "Połączenia bezpośrednie", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Połączenia bezpośrednie umożliwiają użytkownikom łączenie się z własnymi końcówkami API kompatybilnymi z OpenAI.", "Direct Connections settings updated": "Ustawienia połączeń bezpośrednich zaktualizowane", @@ -315,6 +318,8 @@ "Dive into knowledge": "Zanurz się w wiedzy", "Do not install functions from sources you do not fully trust.": "Nie instaluj funkcji ze źródeł, którym nie ufasz w pełni.", "Do not install tools from sources you do not fully trust.": "Nie instaluj narzędzi ze źródeł, którym nie ufasz w pełni.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Wprowadź wielkość bloku", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Wprowadź opis", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "Wprowadź domeny oddzielone przecinkami (np. example.com, site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Eksportuj prompty", "Export to CSV": "Eksport do CSV", "Export Tools": "Eksportuj narzędzia", + "External": "", "External Models": "Zewnętrzne modele", "Failed to add file.": "Nie udało się dodać pliku.", "Failed to create API Key.": "Nie udało się wygenerować klucza API.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "Instrukcje systemowe", "System Prompt": "Podpowiedź systemowa", + "Tags": "", "Tags Generation": "Generowanie tagów", "Tags Generation Prompt": "Podpowiedź do generowania tagów", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Możesz rozmawiać jednocześnie maksymalnie z {{maxCount}} plikiem(i).", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Możesz spersonalizować swoje interakcje z LLM, dodając wspomnienia za pomocą przycisku 'Zarządzaj' poniżej, dzięki czemu będą one bardziej pomocne i dostosowane do Ciebie.", "You cannot upload an empty file.": "Nie możesz przesłać pustego pliku.", - "You do not have permission to access this feature.": "Nie masz uprawnień do korzystania z tej funkcji.", "You do not have permission to upload files": "Nie masz uprawnień do przesyłania plików.", "You do not have permission to upload files.": "Nie masz uprawnień do przesyłania plików.", "You have no archived conversations.": "Nie posiadasz zarchiwizowanych konwersacji.", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 3466df15184..8629a2f3a23 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Já tem uma conta?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Incrível", "an assistant": "um assistente", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Descreva sua base de conhecimento e objetivos", "Description": "Descrição", "Didn't fully follow instructions": "Não seguiu completamente as instruções", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Explorar base de conhecimento", "Do not install functions from sources you do not fully trust.": "Não instale funções de fontes que você não confia totalmente.", "Do not install tools from sources you do not fully trust.": "Não instale ferramentas de fontes que você não confia totalmente.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Documento", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Digite o Tamanho do Chunk", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Digite a descrição", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportar Prompts", "Export to CSV": "Exportar para CSV", "Export Tools": "Exportar Ferramentas", + "External": "", "External Models": "Modelos Externos", "Failed to add file.": "Falha ao adicionar arquivo.", "Failed to create API Key.": "Falha ao criar a Chave API.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "Instruções do sistema", "System Prompt": "Prompt do Sistema", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Prompt para geração de Tags", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Você só pode conversar com no máximo {{maxCount}} arquivo(s) de cada vez.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar suas interações com LLMs adicionando memórias através do botão 'Gerenciar' abaixo, tornando-as mais úteis e adaptadas a você.", "You cannot upload an empty file.": "Você não pode carregar um arquivo vazio.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Você não tem permissão para fazer upload de arquivos.", "You have no archived conversations.": "Você não tem conversas arquivadas.", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index afd18a20a5a..6b19c3743bc 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Já tem uma conta?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "um assistente", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Descrição", "Didn't fully follow instructions": "Não seguiu instruções com precisão", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Documento", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Escreva o Tamanho do Fragmento", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportar Prompts", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "Modelos Externos", "Failed to add file.": "", "Failed to create API Key.": "Falha ao criar a Chave da API.", @@ -983,6 +990,7 @@ "System": "Sistema", "System Instructions": "", "System Prompt": "Prompt do Sistema", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar as suas interações com LLMs adicionando memórias através do botão ‘Gerir’ abaixo, tornando-as mais úteis e personalizadas para você.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Você não tem conversas arquivadas.", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index 4413efed4fd..9bd24f105db 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Deja ai un cont?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Întotdeauna", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Uimitor", "an assistant": "un asistent", "Analyzed": "Analizat", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Descriere", "Didn't fully follow instructions": "Nu a urmat complet instrucțiunile", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Nu instalați funcții din surse în care nu aveți încredere completă.", "Do not install tools from sources you do not fully trust.": "Nu instalați instrumente din surse în care nu aveți încredere completă.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Document", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Introduceți Dimensiunea Blocului", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Introduceți descrierea", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportă Prompturile", "Export to CSV": "", "Export Tools": "Exportă Instrumentele", + "External": "", "External Models": "Modele Externe", "Failed to add file.": "Eșec la adăugarea fișierului.", "Failed to create API Key.": "Crearea cheii API a eșuat.", @@ -983,6 +990,7 @@ "System": "Sistem", "System Instructions": "Instrucțiuni pentru sistem", "System Prompt": "Prompt de Sistem", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Generarea de Etichete Prompt", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Puteți discuta cu un număr maxim de {{maxCount}} fișier(e) simultan.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puteți personaliza interacțiunile dvs. cu LLM-urile adăugând amintiri prin butonul 'Gestionează' de mai jos, făcându-le mai utile și adaptate la dvs.", "You cannot upload an empty file.": "Nu poți încărca un fișier gol.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Nu aveți conversații arhivate.", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 2342a47b936..0db3741187f 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "У вас уже есть учетная запись?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Альтернатива top_p и направлена на обеспечение баланса качества и разнообразия. Параметр p представляет минимальную вероятность того, что токен будет рассмотрен, по сравнению с вероятностью наиболее вероятного токена. Например, при p=0,05 и наиболее вероятном значении токена, имеющем вероятность 0,9, логиты со значением менее 0,045 отфильтровываются.", "Always": "Всегда", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Удивительный", "an assistant": "ассистент", "Analyzed": "Проанализировано", @@ -270,6 +272,7 @@ "Default Prompt Suggestions": "Предложения промптов по умолчанию", "Default to 389 or 636 if TLS is enabled": "По умолчанию 389 или 636, если TLS включен.", "Default to ALL": "По умолчанию ВСЕ", + "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "", "Default User Role": "Роль пользователя по умолчанию", "Delete": "Удалить", "Delete a model": "Удалить модель", @@ -292,6 +295,7 @@ "Describe your knowledge base and objectives": "Опишите свою базу знаний и цели", "Description": "Описание", "Didn't fully follow instructions": "Не полностью следует инструкциям", + "Direct": "", "Direct Connections": "Прямые подключения", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Прямые подключения позволяют пользователям подключаться к своим собственным конечным точкам API, совместимым с OpenAI.", "Direct Connections settings updated": "Настройки прямых подключений обновлены", @@ -314,6 +318,8 @@ "Dive into knowledge": "Погрузитесь в знания", "Do not install functions from sources you do not fully trust.": "Не устанавливайте функции из источников, которым вы не полностью доверяете.", "Do not install tools from sources you do not fully trust.": "Не устанавливайте инструменты из источников, которым вы не полностью доверяете.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Документ", "Document Intelligence": "Интеллектуальный анализ документов", "Document Intelligence endpoint and key required.": "Требуется энд-поинт анализа документов и ключ.", @@ -384,6 +390,7 @@ "Enter Chunk Size": "Введите размер фрагмента", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Введите пары \"token:bias_value\", разделенные запятыми (пример: 5432:100, 413:-100).", "Enter description": "Введите описание", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Введите энд-поинт анализа документов", "Enter Document Intelligence Key": "Введите ключ для анализа документов", "Enter domains separated by commas (e.g., example.com,site.org)": "Введите домены, разделенные запятыми (например, example.com,site.org)", @@ -471,6 +478,7 @@ "Export Prompts": "Экспортировать промпты", "Export to CSV": "Экспортировать в CSV", "Export Tools": "Экспортировать инструменты", + "External": "", "External Models": "Внешние модели", "Failed to add file.": "Не удалось добавить файл.", "Failed to create API Key.": "Не удалось создать ключ API.", @@ -566,8 +574,7 @@ "Image Generation": "Генерация изображений", "Image Generation (Experimental)": "Генерация изображений (Экспериментально)", "Image Generation Engine": "Механизм генерации изображений", - "Image Max Compression Size": "Image Max Compression Size -Максимальный размер сжатия изображения", + "Image Max Compression Size": "Максимальный размер сжатия изображения", "Image Prompt Generation": "Генерация промпта к изображению", "Image Prompt Generation Prompt": "Промпт для создание промпта изображения", "Image Settings": "Настройки изображения", @@ -584,6 +591,7 @@ "Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui", "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Влияет на то, насколько быстро алгоритм реагирует на обратную связь из сгенерированного текста. Более низкая скорость обучения приведет к более медленной корректировке, в то время как более высокая скорость обучения сделает алгоритм более отзывчивым.", "Info": "Информация", + "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "", "Input commands": "Введите команды", "Install from Github URL": "Установка с URL-адреса Github", "Instant Auto-Send After Voice Transcription": "Мгновенная автоматическая отправка после расшифровки голоса", @@ -807,6 +815,7 @@ "Presence Penalty": "Штраф за присутствие", "Previous 30 days": "Предыдущие 30 дней", "Previous 7 days": "Предыдущие 7 дней", + "Private": "", "Profile Image": "Изображение профиля", "Prompt": "Промпт", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (например, Расскажи мне интересный факт о Римской империи)", @@ -816,6 +825,7 @@ "Prompt updated successfully": "Промпт успешно обновлён", "Prompts": "Промпты", "Prompts Access": "Доступ к промптам", + "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Загрузить \"{{searchValue}}\" с Ollama.com", "Pull a model from Ollama.com": "Загрузить модель с Ollama.com", "Query Generation Prompt": "Запрос на генерацию промпта", @@ -980,6 +990,7 @@ "System": "Система", "System Instructions": "Системные инструкции", "System Prompt": "Системный промпт", + "Tags": "", "Tags Generation": "Генерация тегов", "Tags Generation Prompt": "Промпт для генерации тегов", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Выборка без хвостов используется для уменьшения влияния менее вероятных токенов на выходные данные. Более высокое значение (например, 2.0) еще больше уменьшит влияние, в то время как значение 1.0 отключает эту настройку.", @@ -1010,6 +1021,7 @@ "Theme": "Тема", "Thinking...": "Думаю...", "This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?", + "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.", "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Этот параметр определяет, сколько токенов сохраняется при обновлении контекста. Например, если задано значение 2, будут сохранены последние 2 токена контекста беседы. Сохранение контекста может помочь сохранить непрерывность беседы, но может уменьшить возможность отвечать на новые темы.", @@ -1119,6 +1131,7 @@ "Valves updated successfully": "Вентили успешно обновлены", "variable": "переменная", "variable to have them replaced with clipboard content.": "переменную, чтобы заменить их содержимым буфера обмена.", + "Verify Connection": "", "Version": "Версия", "Version {{selectedVersion}} of {{totalVersions}}": "Версия {{selectedVersion}} из {{totalVersions}}", "View Replies": "С ответами", @@ -1164,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Одновременно вы можете общаться только с максимальным количеством файлов {{maxCount}}.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Вы можете персонализировать свое взаимодействие с LLMs, добавив воспоминания с помощью кнопки \"Управлять\" ниже, что сделает их более полезными и адаптированными для вас.", "You cannot upload an empty file.": "Вы не можете загрузить пустой файл.", - "You do not have permission to access this feature.": "У вас нет разрешения на доступ к этой функции", "You do not have permission to upload files": "У вас нет разрешения на загрузку файлов", "You do not have permission to upload files.": "У вас нет разрешения на загрузку файлов.", "You have no archived conversations.": "У вас нет архивированных бесед.", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index 6bd8b92e570..0ca11519ab5 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Už máte účet?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "asistent", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Popis", "Didn't fully follow instructions": "Nenasledovali ste presne všetky inštrukcie.", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Neinštalujte funkcie zo zdrojov, ktorým plne nedôverujete.", "Do not install tools from sources you do not fully trust.": "Neinštalujte nástroje zo zdrojov, ktorým plne nedôverujete.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Zadajte veľkosť časti", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Zadajte popis", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportovať prompty", "Export to CSV": "", "Export Tools": "Exportné nástroje", + "External": "", "External Models": "Externé modely", "Failed to add file.": "Nepodarilo sa pridať súbor.", "Failed to create API Key.": "Nepodarilo sa vytvoriť API kľúč.", @@ -983,6 +990,7 @@ "System": "Systém", "System Instructions": "", "System Prompt": "Systémový prompt", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "Prompt na generovanie značiek", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Môžete komunikovať len s maximálne {{maxCount}} súbor(ami) naraz.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Môžete personalizovať svoje interakcie s LLM pridaním spomienok prostredníctvom tlačidla 'Spravovať' nižšie, čo ich urobí pre vás užitočnejšími a lepšie prispôsobenými.", "You cannot upload an empty file.": "Nemôžete nahrať prázdny súbor.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Nemáte žiadne archivované konverzácie.", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index 41bf530790e..171cac0241e 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Већ имате налог?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Невероватно", "an assistant": "помоћник", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Опишите вашу базу знања и циљеве", "Description": "Опис", "Didn't fully follow instructions": "Упутства нису праћена у потпуности", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Ускочите у знање", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Документ", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Унесите величину дела", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Извези упите", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "Неуспешно стварање API кључа.", @@ -983,6 +990,7 @@ "System": "Систем", "System Instructions": "Системске инструкције", "System Prompt": "Системски упит", + "Tags": "", "Tags Generation": "Стварање ознака", "Tags Generation Prompt": "Упит стварања ознака", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Можете учинити разговор са ВЈМ-овима приснијим додавањем сећања користећи „Управљај“ думе испод и тиме их учинити приснијим и кориснијим.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Немате архивиране разговоре.", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index 1373b8098c1..a2700258c6c 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Har du redan ett konto?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Alltid", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Fantastiskt", "an assistant": "en assistent", "Analyzed": "Analyserad", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Beskrivning", "Didn't fully follow instructions": "Följde inte instruktionerna", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "Dokument", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Ange chunkstorlek", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Exportera instruktioner", "Export to CSV": "", "Export Tools": "Exportera verktyg", + "External": "", "External Models": "Externa modeller", "Failed to add file.": "", "Failed to create API Key.": "Misslyckades med att skapa API-nyckel.", @@ -983,6 +990,7 @@ "System": "System", "System Instructions": "", "System Prompt": "Systeminstruktion", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Du kan endast chatta med maximalt {{maxCount}} fil(er) på samma gång", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan anpassa dina interaktioner med stora språkmodeller genom att lägga till minnen via knappen 'Hantera' nedan, så att de blir mer användbara och skräddarsydda för dig.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Du har inga arkiverade samtal.", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 68e369ae127..0f1ce53a949 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "มีบัญชีอยู่แล้ว?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "ผู้ช่วย", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "คำอธิบาย", "Didn't fully follow instructions": "ไม่ได้ปฏิบัติตามคำแนะนำทั้งหมด", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "อย่าติดตั้งฟังก์ชันจากแหล่งที่คุณไม่ไว้วางใจอย่างเต็มที่", "Do not install tools from sources you do not fully trust.": "อย่าติดตั้งเครื่องมือจากแหล่งที่คุณไม่ไว้วางใจอย่างเต็มที่", + "Docling": "", + "Docling Server URL required.": "", "Document": "เอกสาร", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "ใส่ขนาดส่วนข้อมูล", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "ส่งออกพรอมต์", "Export to CSV": "", "Export Tools": "ส่งออกเครื่องมือ", + "External": "", "External Models": "โมเดลภายนอก", "Failed to add file.": "", "Failed to create API Key.": "สร้างคีย์ API ล้มเหลว", @@ -983,6 +990,7 @@ "System": "ระบบ", "System Instructions": "", "System Prompt": "ระบบพรอมต์", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "คุณสามารถปรับแต่งการโต้ตอบของคุณกับ LLMs โดยเพิ่มความทรงจำผ่านปุ่ม 'จัดการ' ด้านล่าง ทำให้มันมีประโยชน์และเหมาะกับคุณมากขึ้น", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "คุณไม่มีการสนทนาที่เก็บถาวร", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index aff1b821c15..41d481530dd 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "", "Didn't fully follow instructions": "", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "", "Do not install tools from sources you do not fully trust.": "", + "Docling": "", + "Docling Server URL required.": "", "Document": "", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "", "Export to CSV": "", "Export Tools": "", + "External": "", "External Models": "", "Failed to add file.": "", "Failed to create API Key.": "", @@ -983,6 +990,7 @@ "System": "", "System Instructions": "", "System Prompt": "", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index 0dd34ff1cd5..bc8b4d82ee5 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Zaten bir hesabınız mı var?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Harika", "an assistant": "bir asistan", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Bilgi tabanınızı ve hedeflerinizi açıklayın", "Description": "Açıklama", "Didn't fully follow instructions": "Talimatları tam olarak takip etmedi", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "Bilgiye dalmak", "Do not install functions from sources you do not fully trust.": "Tamamen güvenmediğiniz kaynaklardan fonksiyonlar yüklemeyin.", "Do not install tools from sources you do not fully trust.": "Tamamen güvenmediğiniz kaynaklardan araçlar yüklemeyin.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Belge", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Chunk Boyutunu Girin", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "Açıklama girin", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Promptları Dışa Aktar", "Export to CSV": "CSV'ye Aktar", "Export Tools": "Araçları Dışa Aktar", + "External": "", "External Models": "Modelleri Dışa Aktar", "Failed to add file.": "Dosya eklenemedi.", "Failed to create API Key.": "API Anahtarı oluşturulamadı.", @@ -983,6 +990,7 @@ "System": "Sistem", "System Instructions": "Sistem Talimatları", "System Prompt": "Sistem Promptu", + "Tags": "", "Tags Generation": "Etiketler Oluşturma", "Tags Generation Prompt": "Etiketler Oluşturma Promptu", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Aynı anda en fazla {{maxCount}} dosya ile sohbet edebilirsiniz.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Aşağıdaki 'Yönet' düğmesi aracılığıyla bellekler ekleyerek LLM'lerle etkileşimlerinizi kişiselleştirebilir, onları daha yararlı ve size özel hale getirebilirsiniz.", "You cannot upload an empty file.": "Boş bir dosya yükleyemezsiniz.", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "Dosya yüklemek için izniniz yok.", "You have no archived conversations.": "Arşivlenmiş sohbetleriniz yok.", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index 7fce2364422..f8a3fc14d86 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Вже є обліковий запис?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Альтернатива top_p, що спрямована на забезпечення балансу між якістю та різноманітністю. Параметр p представляє мінімальну ймовірність для врахування токена відносно ймовірності найбільш ймовірного токена. Наприклад, при p=0.05 і ймовірності найбільш ймовірного токена 0.9, логіти зі значенням менше 0.045 відфільтровуються.", "Always": "Завжди", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "Чудово", "an assistant": "асистента", "Analyzed": "Проаналізовано", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "Опишіть вашу базу знань та цілі", "Description": "Опис", "Didn't fully follow instructions": "Не повністю дотримувалися інструкцій", + "Direct": "", "Direct Connections": "Прямі з'єднання", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Прямі з'єднання дозволяють користувачам підключатися до своїх власних API-кінцевих точок, сумісних з OpenAI.", "Direct Connections settings updated": "Налаштування прямих з'єднань оновлено", @@ -315,6 +318,8 @@ "Dive into knowledge": "Зануртесь у знання", "Do not install functions from sources you do not fully trust.": "Не встановлюйте функції з джерел, яким ви не повністю довіряєте.", "Do not install tools from sources you do not fully trust.": "Не встановлюйте інструменти з джерел, яким ви не повністю довіряєте.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Документ", "Document Intelligence": "Інтелект документа", "Document Intelligence endpoint and key required.": "Потрібні кінцева точка та ключ для Інтелекту документа.", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Введіть розмір фрагменту", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Введіть пари \"токен:значення_зміщення\", розділені комами (напр.: 5432:100, 413:-100)", "Enter description": "Введіть опис", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Введіть кінцеву точку Інтелекту документа", "Enter Document Intelligence Key": "Введіть ключ Інтелекту документа", "Enter domains separated by commas (e.g., example.com,site.org)": "Введіть домени, розділені комами (наприклад, example.com, site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "Експорт промтів", "Export to CSV": "Експорт в CSV", "Export Tools": "Експорт інструментів", + "External": "", "External Models": "Зовнішні моделі", "Failed to add file.": "Не вдалося додати файл.", "Failed to create API Key.": "Не вдалося створити API ключ.", @@ -983,6 +990,7 @@ "System": "Система", "System Instructions": "Системні інструкції", "System Prompt": "Системний промт", + "Tags": "", "Tags Generation": "Генерація тегів", "Tags Generation Prompt": "Підказка для генерації тегів", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Вибірка без хвоста використовується для зменшення впливу менш ймовірних токенів на результат. Вищий показник (напр., 2.0) зменшить вплив сильніше, тоді як значення 1.0 вимикає цю опцію.", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Ви можете спілкуватися лише з максимальною кількістю {{maxCount}} файлів одночасно.", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Ви можете налаштувати ваші взаємодії з мовними моделями, додавши спогади через кнопку 'Керувати' внизу, що зробить їх більш корисними та персоналізованими для вас.", "You cannot upload an empty file.": "Ви не можете завантажити порожній файл.", - "You do not have permission to access this feature.": "У вас немає дозволу на доступ до цієї функції.", "You do not have permission to upload files": "У вас немає дозволу на завантаження файлів", "You do not have permission to upload files.": "У вас немає дозволу завантажувати файли.", "You have no archived conversations.": "У вас немає архівованих розмов.", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 450c3002045..39a89f44923 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "کیا پہلے سے اکاؤنٹ موجود ہے؟", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "معاون", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "تفصیل", "Didn't fully follow instructions": "ہدایات کو مکمل طور پر نہیں سمجھا", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "ایسی جگہوں سے فنکشنز انسٹال نہ کریں جن پر آپ مکمل بھروسہ نہیں کرتے", "Do not install tools from sources you do not fully trust.": "جن ذرائع پر آپ مکمل بھروسہ نہیں کرتے، ان سے ٹولز انسٹال نہ کریں", + "Docling": "", + "Docling Server URL required.": "", "Document": "دستاویز", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "چنک سائز درج کریں", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "تفصیل درج کریں", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "پرامپٹس برآمد کریں", "Export to CSV": "", "Export Tools": "ایکسپورٹ ٹولز", + "External": "", "External Models": "بیرونی ماڈلز", "Failed to add file.": "فائل شامل کرنے میں ناکام", "Failed to create API Key.": "API کلید بنانے میں ناکام", @@ -983,6 +990,7 @@ "System": "سسٹم", "System Instructions": "نظام کی ہدایات", "System Prompt": "سسٹم پرومپٹ", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "پرمپٹ کے لیے ٹیگز بنائیں", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "آپ ایک وقت میں زیادہ سے زیادہ {{maxCount}} فائل(وں) کے ساتھ صرف چیٹ کر سکتے ہیں", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "آپ نیچے موجود 'Manage' بٹن کے ذریعے LLMs کے ساتھ اپنی بات چیت کو یادداشتیں شامل کرکے ذاتی بنا سکتے ہیں، جو انہیں آپ کے لیے زیادہ مددگار اور آپ کے متعلق بنائے گی", "You cannot upload an empty file.": "آپ خالی فائل اپلوڈ نہیں کر سکتے", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "آپ کے پاس کوئی محفوظ شدہ مکالمات نہیں ہیں", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 993204389c6..a405b4424f4 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "Bạn đã có tài khoản?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "", "an assistant": "trợ lý", "Analyzed": "", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "", "Description": "Mô tả", "Didn't fully follow instructions": "Không tuân theo chỉ dẫn một cách đầy đủ", + "Direct": "", "Direct Connections": "", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", "Direct Connections settings updated": "", @@ -315,6 +318,8 @@ "Dive into knowledge": "", "Do not install functions from sources you do not fully trust.": "Không cài đặt các functions từ các nguồn mà bạn không hoàn toàn tin tưởng.", "Do not install tools from sources you do not fully trust.": "Không cài đặt các tools từ những nguồn mà bạn không hoàn toàn tin tưởng.", + "Docling": "", + "Docling Server URL required.": "", "Document": "Tài liệu", "Document Intelligence": "", "Document Intelligence endpoint and key required.": "", @@ -385,6 +390,7 @@ "Enter Chunk Size": "Nhập Kích thước Chunk", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", "Enter description": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "", "Enter Document Intelligence Key": "", "Enter domains separated by commas (e.g., example.com,site.org)": "", @@ -472,6 +478,7 @@ "Export Prompts": "Tải các prompt về máy", "Export to CSV": "", "Export Tools": "Tải Tools về máy", + "External": "", "External Models": "Các model ngoài", "Failed to add file.": "", "Failed to create API Key.": "Lỗi khởi tạo API Key", @@ -983,6 +990,7 @@ "System": "Hệ thống", "System Instructions": "", "System Prompt": "Prompt Hệ thống (System Prompt)", + "Tags": "", "Tags Generation": "", "Tags Generation Prompt": "", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Bạn có thể cá nhân hóa các tương tác của mình với LLM bằng cách thêm bộ nhớ thông qua nút 'Quản lý' bên dưới, làm cho chúng hữu ích hơn và phù hợp với bạn hơn.", "You cannot upload an empty file.": "", - "You do not have permission to access this feature.": "", "You do not have permission to upload files": "", "You do not have permission to upload files.": "", "You have no archived conversations.": "Bạn chưa lưu trữ một nội dung chat nào", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index d818275a371..f233be12d47 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "已经拥有账号了?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方法,旨在确保质量和多样性之间的平衡。参数 p 表示相对于最可能令牌的概率,一个令牌被考虑的最小概率。例如,当 p=0.05 且最可能的令牌概率为 0.9 时,概率值小于 0.045 的词元将被过滤掉。", "Always": "保持", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "很棒", "an assistant": "一个助手", "Analyzed": "已分析", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "描述您的知识库和目标", "Description": "描述", "Didn't fully follow instructions": "没有完全遵照指示", + "Direct": "", "Direct Connections": "直接连接", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "直接连接功能允许用户连接至其自有的、兼容 OpenAI 的 API 端点。", "Direct Connections settings updated": "直接连接设置已更新", @@ -315,6 +318,8 @@ "Dive into knowledge": "深入知识的海洋", "Do not install functions from sources you do not fully trust.": "切勿安装来源不完全可信的函数。", "Do not install tools from sources you do not fully trust.": "切勿安装来源不完全可信的工具。", + "Docling": "", + "Docling Server URL required.": "", "Document": "文档", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "需要 Document Intelligence 端点和密钥。", @@ -385,6 +390,7 @@ "Enter Chunk Size": "输入块大小 (Chunk Size)", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "输入以逗号分隔的“token:bias_value”对(例如:5432:100, 413:-100)", "Enter description": "输入简介描述", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "输入 Document Intelligence 端点", "Enter Document Intelligence Key": "输入 Document Intelligence 密钥", "Enter domains separated by commas (e.g., example.com,site.org)": "输入以逗号分隔的域名(例如:example.com、site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "导出提示词", "Export to CSV": "导出到 CSV", "Export Tools": "导出工具", + "External": "", "External Models": "外部模型", "Failed to add file.": "添加文件失败。", "Failed to create API Key.": "无法创建 API 密钥。", @@ -983,6 +990,7 @@ "System": "系统", "System Instructions": "系统指令", "System Prompt": "系统提示词 (System Prompt)", + "Tags": "", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "无尾采样用于减少输出中出现概率较小的 Token 的影响。较高的值(例如 2.0)将进一步减少影响,而值 1.0 则禁用此设置。", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "每次对话最多仅能附上 {{maxCount}} 个文件。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。", "You cannot upload an empty file.": "请勿上传空文件。", - "You do not have permission to access this feature.": "你没有访问此功能的权限。", "You do not have permission to upload files": "你没有上传文件的权限", "You do not have permission to upload files.": "你没有上传文件的权限。", "You have no archived conversations.": "没有已归档的对话。", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 203ae71b255..578b18cba90 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -68,6 +68,8 @@ "Already have an account?": "已經有帳號了嗎?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方案,旨在確保品質與多樣性之間的平衡。參數 p 代表一個 token 被考慮的最低機率,相對於最有可能 token 的機率。例如,當 p=0.05 且最有可能 token 的機率為 0.9 時,機率小於 0.045 的 logits 將被過濾掉。", "Always": "總是", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", "Amazing": "很棒", "an assistant": "一位助手", "Analyzed": "分析完畢", @@ -293,6 +295,7 @@ "Describe your knowledge base and objectives": "描述您的知識庫和目標", "Description": "描述", "Didn't fully follow instructions": "未完全遵循指示", + "Direct": "", "Direct Connections": "直接連線", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "直接連線允許使用者連接到自己的 OpenAI 相容 API 端點。", "Direct Connections settings updated": "直接連線設定已更新。", @@ -315,6 +318,8 @@ "Dive into knowledge": "深入知識", "Do not install functions from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝函式。", "Do not install tools from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝工具。", + "Docling": "", + "Docling Server URL required.": "", "Document": "文件", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "需提供 Document Intelligence 端點及金鑰", @@ -385,6 +390,7 @@ "Enter Chunk Size": "輸入區塊大小", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "輸入逗號分隔的 \"token:bias_value\" 配對 (範例:5432:100, 413:-100)", "Enter description": "輸入描述", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "輸入 Document Intelligence 端點", "Enter Document Intelligence Key": "輸入 Document Intelligence 金鑰", "Enter domains separated by commas (e.g., example.com,site.org)": "輸入網域,以逗號分隔(例如:example.com, site.org)", @@ -472,6 +478,7 @@ "Export Prompts": "匯出提示詞", "Export to CSV": "匯出為 CSV", "Export Tools": "匯出工具", + "External": "", "External Models": "外部模型", "Failed to add file.": "新增檔案失敗。", "Failed to create API Key.": "建立 API 金鑰失敗。", @@ -983,6 +990,7 @@ "System": "系統", "System Instructions": "系統指令", "System Prompt": "系統提示詞", + "Tags": "", "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "尾部自由採樣用於減少輸出結果中較低機率 token 的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 時會停用此設定。", @@ -1169,7 +1177,6 @@ "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "您一次最多只能與 {{maxCount}} 個檔案進行對話。", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "您可以透過下方的「管理」按鈕新增記憶,將您與大型語言模型的互動個人化,讓它們更有幫助並更符合您的需求。", "You cannot upload an empty file.": "您無法上傳空檔案", - "You do not have permission to access this feature.": "您沒有權限訪問此功能", "You do not have permission to upload files": "您沒有權限上傳檔案", "You do not have permission to upload files.": "您沒有權限上傳檔案。", "You have no archived conversations.": "您沒有已封存的對話。", From b609b9d2975d49a78f384b3513cd90a9b7a37f61 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 18 Mar 2025 06:39:42 -0700 Subject: [PATCH 341/623] chore: format --- backend/open_webui/utils/plugin.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/utils/plugin.py b/backend/open_webui/utils/plugin.py index 685746b19af..29a4d0cceb9 100644 --- a/backend/open_webui/utils/plugin.py +++ b/backend/open_webui/utils/plugin.py @@ -170,7 +170,12 @@ def install_frontmatter_requirements(requirements: str): try: req_list = [req.strip() for req in requirements.split(",")] log.info(f"Installing requirements: {' '.join(req_list)}") - subprocess.check_call([sys.executable, "-m", "pip", "install"] + PIP_OPTIONS + req_list + PIP_PACKAGE_INDEX_OPTIONS) + subprocess.check_call( + [sys.executable, "-m", "pip", "install"] + + PIP_OPTIONS + + req_list + + PIP_PACKAGE_INDEX_OPTIONS + ) except Exception as e: log.error(f"Error installing packages: {' '.join(req_list)}") raise e From 87a3a893e22eb6574748a1806bdbb2247563c316 Mon Sep 17 00:00:00 2001 From: Panda Date: Tue, 18 Mar 2025 15:54:03 +0100 Subject: [PATCH 342/623] i18n: zh-cn --- src/lib/i18n/locales/zh-CN/translation.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index f233be12d47..68abea61673 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -68,8 +68,8 @@ "Already have an account?": "已经拥有账号了?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方法,旨在确保质量和多样性之间的平衡。参数 p 表示相对于最可能令牌的概率,一个令牌被考虑的最小概率。例如,当 p=0.05 且最可能的令牌概率为 0.9 时,概率值小于 0.045 的词元将被过滤掉。", "Always": "保持", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "始终折叠代码块", + "Always Expand Details": "始终展开详细信息", "Amazing": "很棒", "an assistant": "一个助手", "Analyzed": "已分析", @@ -295,7 +295,7 @@ "Describe your knowledge base and objectives": "描述您的知识库和目标", "Description": "描述", "Didn't fully follow instructions": "没有完全遵照指示", - "Direct": "", + "Direct": "直接", "Direct Connections": "直接连接", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "直接连接功能允许用户连接至其自有的、兼容 OpenAI 的 API 端点。", "Direct Connections settings updated": "直接连接设置已更新", @@ -318,8 +318,8 @@ "Dive into knowledge": "深入知识的海洋", "Do not install functions from sources you do not fully trust.": "切勿安装来源不完全可信的函数。", "Do not install tools from sources you do not fully trust.": "切勿安装来源不完全可信的工具。", - "Docling": "", - "Docling Server URL required.": "", + "Docling": "Docling", + "Docling Server URL required.": "需要提供 Docling 服务器 URL", "Document": "文档", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "需要 Document Intelligence 端点和密钥。", @@ -390,7 +390,7 @@ "Enter Chunk Size": "输入块大小 (Chunk Size)", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "输入以逗号分隔的“token:bias_value”对(例如:5432:100, 413:-100)", "Enter description": "输入简介描述", - "Enter Docling Server URL": "", + "Enter Docling Server URL": "输入 Docling 服务器 URL", "Enter Document Intelligence Endpoint": "输入 Document Intelligence 端点", "Enter Document Intelligence Key": "输入 Document Intelligence 密钥", "Enter domains separated by commas (e.g., example.com,site.org)": "输入以逗号分隔的域名(例如:example.com、site.org)", @@ -478,7 +478,7 @@ "Export Prompts": "导出提示词", "Export to CSV": "导出到 CSV", "Export Tools": "导出工具", - "External": "", + "External": "外部", "External Models": "外部模型", "Failed to add file.": "添加文件失败。", "Failed to create API Key.": "无法创建 API 密钥。", @@ -990,7 +990,7 @@ "System": "系统", "System Instructions": "系统指令", "System Prompt": "系统提示词 (System Prompt)", - "Tags": "", + "Tags": "标签", "Tags Generation": "标签生成", "Tags Generation Prompt": "标签生成提示词", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "无尾采样用于减少输出中出现概率较小的 Token 的影响。较高的值(例如 2.0)将进一步减少影响,而值 1.0 则禁用此设置。", From ba676b7ed6a4ce141474d7c31797ea2fd8aa513a Mon Sep 17 00:00:00 2001 From: Marko Henning Date: Tue, 18 Mar 2025 16:25:24 +0100 Subject: [PATCH 343/623] Use k_reranker also for result merge, and add special sorting use case for ChromaDB --- backend/open_webui/retrieval/utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 106c9da063c..9b8d5835269 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -146,7 +146,10 @@ def query_doc_with_hybrid_search( # retrieve only min(k, k_reranker) items, sort and cut by distance if k < k_reranker if k < k_reranker: - sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True) + if VECTOR_DB == "chroma": + sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=False) + else: + sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True) sorted_items = sorted_items[:k] distances, documents, metadatas = map(list, zip(*sorted_items)) result = { @@ -310,9 +313,9 @@ def query_collection_with_hybrid_search( if VECTOR_DB == "chroma": # Chroma uses unconventional cosine similarity, so we don't need to reverse the results # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections - return merge_and_sort_query_results(results, k=k, reverse=False) + return merge_and_sort_query_results(results, k=k_reranker, reverse=False) else: - return merge_and_sort_query_results(results, k=k, reverse=True) + return merge_and_sort_query_results(results, k=k_reranker, reverse=True) def get_embedding_function( From 5ab789e83e124af5383a06a80dd3e55cef713f2b Mon Sep 17 00:00:00 2001 From: Marko Henning Date: Tue, 18 Mar 2025 16:44:58 +0100 Subject: [PATCH 344/623] Add documentation on chroma special case --- backend/open_webui/retrieval/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 9b8d5835269..1afb333b11b 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -147,6 +147,8 @@ def query_doc_with_hybrid_search( # retrieve only min(k, k_reranker) items, sort and cut by distance if k < k_reranker if k < k_reranker: if VECTOR_DB == "chroma": + # Chroma uses unconventional cosine similarity, so we don't need to reverse the results + # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=False) else: sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True) From 3b624f35ac770d2bf1d1ba25c35adc39c60454ad Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Wed, 19 Mar 2025 02:00:58 +0900 Subject: [PATCH 345/623] feat: submit chat title rename with Enter, cancel with ESC --- src/lib/components/layout/Sidebar/ChatItem.svelte | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/lib/components/layout/Sidebar/ChatItem.svelte b/src/lib/components/layout/Sidebar/ChatItem.svelte index 80981d00c73..3916ab03d7c 100644 --- a/src/lib/components/layout/Sidebar/ChatItem.svelte +++ b/src/lib/components/layout/Sidebar/ChatItem.svelte @@ -198,6 +198,19 @@ }); let showDeleteConfirm = false; + + const keyDownEvent = (e) => { + if (e.key === 'Enter') { + e.preventDefault(); + editChatTitle(id, chatTitle); + confirmEdit = false; + chatTitle = ''; + } else if (e.key === 'Escape') { + e.preventDefault(); + confirmEdit = false; + chatTitle = ''; + } + }; @@ -246,6 +259,7 @@ bind:value={chatTitle} id="chat-title-input-{id}" class=" bg-transparent w-full outline-hidden mr-10" + on:keydown={keyDownEvent} />
{:else} From 2b687e2c06d004a795fa22c4502accc897739e36 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Wed, 19 Mar 2025 02:23:30 +0900 Subject: [PATCH 346/623] Use consistent function names. --- src/lib/components/layout/Sidebar/ChatItem.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/components/layout/Sidebar/ChatItem.svelte b/src/lib/components/layout/Sidebar/ChatItem.svelte index 3916ab03d7c..0c15a334e8e 100644 --- a/src/lib/components/layout/Sidebar/ChatItem.svelte +++ b/src/lib/components/layout/Sidebar/ChatItem.svelte @@ -199,7 +199,7 @@ let showDeleteConfirm = false; - const keyDownEvent = (e) => { + const chatTitleInputKeydownHandler = (e) => { if (e.key === 'Enter') { e.preventDefault(); editChatTitle(id, chatTitle); @@ -259,7 +259,7 @@ bind:value={chatTitle} id="chat-title-input-{id}" class=" bg-transparent w-full outline-hidden mr-10" - on:keydown={keyDownEvent} + on:keydown={chatTitleInputKeydownHandler} />
{:else} From 05fa67ae8ffd005411702395c973f0673edf2264 Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Wed, 19 Mar 2025 02:54:51 +0800 Subject: [PATCH 347/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 578b18cba90..9c526528817 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -68,8 +68,8 @@ "Already have an account?": "已經有帳號了嗎?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "top_p 的替代方案,旨在確保品質與多樣性之間的平衡。參數 p 代表一個 token 被考慮的最低機率,相對於最有可能 token 的機率。例如,當 p=0.05 且最有可能 token 的機率為 0.9 時,機率小於 0.045 的 logits 將被過濾掉。", "Always": "總是", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "總是摺疊程式碼區塊", + "Always Expand Details": "總是展開詳細資訊", "Amazing": "很棒", "an assistant": "一位助手", "Analyzed": "分析完畢", @@ -295,7 +295,7 @@ "Describe your knowledge base and objectives": "描述您的知識庫和目標", "Description": "描述", "Didn't fully follow instructions": "未完全遵循指示", - "Direct": "", + "Direct": "直接", "Direct Connections": "直接連線", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "直接連線允許使用者連接到自己的 OpenAI 相容 API 端點。", "Direct Connections settings updated": "直接連線設定已更新。", @@ -318,8 +318,8 @@ "Dive into knowledge": "深入知識", "Do not install functions from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝函式。", "Do not install tools from sources you do not fully trust.": "請勿從您無法完全信任的來源安裝工具。", - "Docling": "", - "Docling Server URL required.": "", + "Docling": "Docling", + "Docling Server URL required.": "Docling 伺服器 URL 為必填。", "Document": "文件", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "需提供 Document Intelligence 端點及金鑰", @@ -390,7 +390,7 @@ "Enter Chunk Size": "輸入區塊大小", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "輸入逗號分隔的 \"token:bias_value\" 配對 (範例:5432:100, 413:-100)", "Enter description": "輸入描述", - "Enter Docling Server URL": "", + "Enter Docling Server URL": "請輸入 Docling 伺服器 URL", "Enter Document Intelligence Endpoint": "輸入 Document Intelligence 端點", "Enter Document Intelligence Key": "輸入 Document Intelligence 金鑰", "Enter domains separated by commas (e.g., example.com,site.org)": "輸入網域,以逗號分隔(例如:example.com, site.org)", @@ -478,7 +478,7 @@ "Export Prompts": "匯出提示詞", "Export to CSV": "匯出為 CSV", "Export Tools": "匯出工具", - "External": "", + "External": "外部", "External Models": "外部模型", "Failed to add file.": "新增檔案失敗。", "Failed to create API Key.": "建立 API 金鑰失敗。", @@ -990,7 +990,7 @@ "System": "系統", "System Instructions": "系統指令", "System Prompt": "系統提示詞", - "Tags": "", + "Tags": "標籤", "Tags Generation": "標籤生成", "Tags Generation Prompt": "標籤生成提示詞", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "尾部自由採樣用於減少輸出結果中較低機率 token 的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 時會停用此設定。", From bda5e0af7429fbd34518f41bfabb04696aaf6039 Mon Sep 17 00:00:00 2001 From: Aleix Dorca Date: Tue, 18 Mar 2025 20:08:16 +0100 Subject: [PATCH 348/623] Update Catalan translation.json --- src/lib/i18n/locales/ca-ES/translation.json | 30 ++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 89e95b55b6a..6ca86ae83f6 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -68,8 +68,8 @@ "Already have an account?": "Ja tens un compte?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045.", "Always": "Sempre", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "Reduir sempre els blocs de codi", + "Always Expand Details": "Expandir sempre els detalls", "Amazing": "Al·lucinant", "an assistant": "un assistent", "Analyzed": "Analitzat", @@ -272,7 +272,7 @@ "Default Prompt Suggestions": "Suggeriments d'indicació per defecte", "Default to 389 or 636 if TLS is enabled": "Per defecte 389 o 636 si TLS està habilitat", "Default to ALL": "Per defecte TOTS", - "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "", + "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "Per defecte, Segmented Retrieval per a l'extracció de contingut rellevant, es recomana en la majoria dels casos.", "Default User Role": "Rol d'usuari per defecte", "Delete": "Eliminar", "Delete a model": "Eliminar un model", @@ -295,7 +295,7 @@ "Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius", "Description": "Descripció", "Didn't fully follow instructions": "No s'han seguit les instruccions completament", - "Direct": "", + "Direct": "Directe", "Direct Connections": "Connexions directes", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.", "Direct Connections settings updated": "Configuració de les connexions directes actualitzada", @@ -318,8 +318,8 @@ "Dive into knowledge": "Aprofundir en el coneixement", "Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.", "Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.", - "Docling": "", - "Docling Server URL required.": "", + "Docling": "Docling", + "Docling Server URL required.": "La URL del servidor Docling és necessària", "Document": "Document", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "Fa falta un punt de connexió i una clau per a Document Intelligence.", @@ -365,7 +365,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"", "Enable API Key": "Activar la Clau API", "Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat", - "Enable Code Execution": "", + "Enable Code Execution": "Permetre l'execució de codi", "Enable Code Interpreter": "Activar l'intèrpret de codi", "Enable Community Sharing": "Activar l'ús compartit amb la comunitat", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.", @@ -390,7 +390,7 @@ "Enter Chunk Size": "Introdueix la mida del bloc", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Introdueix parelles de \"token:valor de biaix\" separats per comes (exemple: 5432:100, 413:-100)", "Enter description": "Introdueix la descripció", - "Enter Docling Server URL": "", + "Enter Docling Server URL": "Introdueix la URL del servidor Docling", "Enter Document Intelligence Endpoint": "Introdueix el punt de connexió de Document Intelligence", "Enter Document Intelligence Key": "Introdueix la clau de Document Intelligence", "Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)", @@ -478,7 +478,7 @@ "Export Prompts": "Exportar les indicacions", "Export to CSV": "Exportar a CSV", "Export Tools": "Exportar les eines", - "External": "", + "External": "Extern", "External Models": "Models externs", "Failed to add file.": "No s'ha pogut afegir l'arxiu.", "Failed to create API Key.": "No s'ha pogut crear la clau API.", @@ -591,7 +591,7 @@ "Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui", "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible.", "Info": "Informació", - "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "", + "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "Injectar tot el contingut com a context per a un processament complet, això es recomana per a consultes complexes.", "Input commands": "Entra comandes", "Install from Github URL": "Instal·lar des de l'URL de Github", "Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu", @@ -815,7 +815,7 @@ "Presence Penalty": "Penalització de presència", "Previous 30 days": "30 dies anteriors", "Previous 7 days": "7 dies anteriors", - "Private": "", + "Private": "Privat", "Profile Image": "Imatge de perfil", "Prompt": "Indicació", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Indicació (p.ex. Digues-me quelcom divertit sobre l'Imperi Romà)", @@ -825,7 +825,7 @@ "Prompt updated successfully": "Indicació actualitzada correctament", "Prompts": "Indicacions", "Prompts Access": "Accés a les indicacions", - "Public": "", + "Public": "Públic", "Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com", "Pull a model from Ollama.com": "Obtenir un model d'Ollama.com", "Query Generation Prompt": "Indicació per a generació de consulta", @@ -990,7 +990,7 @@ "System": "Sistema", "System Instructions": "Instruccions de sistema", "System Prompt": "Indicació del Sistema", - "Tags": "", + "Tags": "Etiquetes", "Tags Generation": "Generació d'etiquetes", "Tags Generation Prompt": "Indicació per a la generació d'etiquetes", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració.", @@ -1021,7 +1021,7 @@ "Theme": "Tema", "Thinking...": "Pensant...", "This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?", - "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "", + "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Aquest canal es va crear el dia {{createdAt}}. Aquest és el començament del canal {{channelName}}.", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.", "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes.", @@ -1131,7 +1131,7 @@ "Valves updated successfully": "Valves actualitat correctament", "variable": "variable", "variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.", - "Verify Connection": "", + "Verify Connection": "Verificar la connexió", "Version": "Versió", "Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}", "View Replies": "Veure les respostes", From d68a6227adf4964285fbf1b8f442da3f8a443faa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tuna=20=C3=87a=C4=9Flar=20G=C3=BCm=C3=BC=C5=9F?= Date: Wed, 19 Mar 2025 00:15:39 +0300 Subject: [PATCH 349/623] Turkish language pack updates --- src/lib/i18n/locales/tr-TR/translation.json | 46 ++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index bc8b4d82ee5..d9c9fb16f30 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -22,7 +22,7 @@ "Account Activation Pending": "Hesap Aktivasyonu Bekleniyor", "Accurate information": "Doğru bilgi", "Actions": "Aksiyonlar", - "Activate": "", + "Activate": "Aktif Et", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Sohbet girişine \"/{{COMMAND}}\" yazarak bu komutu etkinleştirin.", "Active Users": "Aktif Kullanıcılar", "Add": "Ekle", @@ -52,7 +52,7 @@ "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Yöneticiler her zaman tüm araçlara erişebilir; kullanıcıların çalışma alanındaki model başına atanmış araçlara ihtiyacı vardır.", "Advanced Parameters": "Gelişmiş Parametreler", "Advanced Params": "Gelişmiş Parametreler", - "All": "", + "All": "Tüm", "All Documents": "Tüm Belgeler", "All models deleted successfully": "Tüm modeller başarıyla silindi", "Allow Chat Controls": "", @@ -67,13 +67,13 @@ "Allowed Endpoints": "İzin Verilen Uç Noktalar", "Already have an account?": "Zaten bir hesabınız mı var?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", - "Always": "", + "Always": "Daima", "Always Collapse Code Blocks": "", "Always Expand Details": "", "Amazing": "Harika", "an assistant": "bir asistan", - "Analyzed": "", - "Analyzing...": "", + "Analyzed": "Analiz edildi", + "Analyzing...": "Analiz ediliyor...", "and": "ve", "and {{COUNT}} more": "ve {{COUNT}} daha", "and create a new shared link.": "ve yeni bir paylaşılan bağlantı oluşturun.", @@ -97,7 +97,7 @@ "Are you sure?": "Emin misiniz?", "Arena Models": "Arena Modelleri", "Artifacts": "Eserler", - "Ask": "", + "Ask": "Sor", "Ask a question": "Bir soru sorun", "Assistant": "Asistan", "Attach file from knowledge": "", @@ -107,7 +107,7 @@ "Audio": "Ses", "August": "Ağustos", "Authenticate": "Kimlik Doğrulama", - "Authentication": "", + "Authentication": "Kimlik Doğrulama", "Auto-Copy Response to Clipboard": "Yanıtı Panoya Otomatik Kopyala", "Auto-playback response": "Yanıtı otomatik oynatma", "Autocomplete Generation": "Otomatik Tamamlama Üretimi", @@ -137,7 +137,7 @@ "By {{name}}": "{{name}} Tarafından", "Bypass Embedding and Retrieval": "", "Bypass SSL verification for Websites": "Web Siteleri için SSL doğrulamasını atlayın", - "Calendar": "", + "Calendar": "Takvim", "Call": "Arama", "Call feature is not supported when using Web STT engine": "Web STT motoru kullanılırken arama özelliği desteklenmiyor", "Camera": "Kamera", @@ -455,15 +455,15 @@ "Exa API Key": "", "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Örnek: (&(objectClass=inetOrgPerson)(uid=%s))", "Example: ALL": "Örnek: ALL", - "Example: mail": "", + "Example: mail": "Örnek: mail", "Example: ou=users,dc=foo,dc=example": "Örnek: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Örnek: sAMAccountName or uid or userPrincipalName", "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Hariç tut", "Execute code for analysis": "", - "Expand": "", + "Expand": "Genişlet", "Experimental": "Deneysel", - "Explain": "", + "Explain": "Açıkla", "Explain this section to me in more detail": "", "Explore the cosmos": "Evreni keşfet", "Export": "Dışa Aktar", @@ -487,8 +487,8 @@ "Failed to save models configuration": "Modeller yapılandırması kaydedilemedi", "Failed to update settings": "Ayarlar güncellenemedi", "Failed to upload file.": "Dosya yüklenemedi.", - "Features": "", - "Features Permissions": "", + "Features": "Özellikler", + "Features Permissions": "Özellik Yetkileri", "February": "Şubat", "Feedback History": "Geri Bildirim Geçmişi", "Feedbacks": "Geri Bildirimler", @@ -536,7 +536,7 @@ "Gemini API Config": "", "Gemini API Key is required.": "", "General": "Genel", - "Generate an image": "", + "Generate an image": "Bir Görsel Oluştur", "Generate Image": "Görsel Üret", "Generate prompt pair": "", "Generating search query": "Arama sorgusu oluşturma", @@ -569,7 +569,7 @@ "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Eylemimin sonuçlarını okuduğumu ve anladığımı kabul ediyorum. Rastgele kod çalıştırmayla ilgili risklerin farkındayım ve kaynağın güvenilirliğini doğruladım.", "ID": "", "Ignite curiosity": "Merak uyandırın", - "Image": "", + "Image": "Görsel", "Image Compression": "Görüntü Sıkıştırma", "Image Generation": "", "Image Generation (Experimental)": "Görüntü Oluşturma (Deneysel)", @@ -638,7 +638,7 @@ "Leave empty to include all models or select specific models": "Tüm modelleri dahil etmek için boş bırakın veya belirli modelleri seçin", "Leave empty to use the default prompt, or enter a custom prompt": "Varsayılan promptu kullanmak için boş bırakın veya özel bir prompt girin", "Leave model field empty to use the default model.": "", - "License": "", + "License": "Lisans", "Light": "Açık", "Listening...": "Dinleniyor...", "Llama.cpp": "", @@ -723,7 +723,7 @@ "No inference engine with management support found": "", "No knowledge found": "Bilgi bulunamadı", "No memories to clear": "", - "No model IDs": "", + "No model IDs": "Model ID yok", "No models found": "Model bulunamadı", "No models selected": "Model seçilmedi", "No results found": "Sonuç bulunamadı", @@ -815,7 +815,7 @@ "Presence Penalty": "", "Previous 30 days": "Önceki 30 gün", "Previous 7 days": "Önceki 7 gün", - "Private": "", + "Private": "Gizli", "Profile Image": "Profil Fotoğrafı", "Prompt": "", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (örn. Roma İmparatorluğu hakkında ilginç bir bilgi verin)", @@ -832,7 +832,7 @@ "RAG Template": "RAG Şablonu", "Rating": "Derecelendirme", "Re-rank models by topic similarity": "Konu benzerliğine göre modelleri yeniden sırala", - "Read": "", + "Read": "Oku", "Read Aloud": "Sesli Oku", "Reasoning Effort": "", "Record voice": "Ses kaydı yap", @@ -994,9 +994,9 @@ "Tags Generation": "Etiketler Oluşturma", "Tags Generation Prompt": "Etiketler Oluşturma Promptu", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", - "Talk to model": "", + "Talk to model": "Model ile konuş", "Tap to interrupt": "Durdurmak için dokunun", - "Tasks": "", + "Tasks": "Görevler", "Tavily API Key": "Tavily API Anahtarı", "Tell us more:": "Bize daha fazlasını anlat:", "Temperature": "Temperature", @@ -1166,7 +1166,7 @@ "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", "Workspace": "Çalışma Alanı", "Workspace Permissions": "Çalışma Alanı İzinleri", - "Write": "", + "Write": "Yaz", "Write a prompt suggestion (e.g. Who are you?)": "Bir prompt önerisi yazın (örn. Sen kimsin?)", "Write a summary in 50 words that summarizes [topic or keyword].": "[Konuyu veya anahtar kelimeyi] özetleyen 50 kelimelik bir özet yazın.", "Write something...": "Bir şeyler yazın...", @@ -1186,6 +1186,6 @@ "Your account status is currently pending activation.": "Hesap durumunuz şu anda etkinleştirilmeyi bekliyor.", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Tüm katkınız doğrudan eklenti geliştiricisine gidecektir; Open WebUI herhangi bir yüzde almaz. Ancak seçilen finansman platformunun kendi ücretleri olabilir.", "Youtube": "Youtube", - "Youtube Language": "", + "Youtube Language": "Youtube Dili", "Youtube Proxy URL": "" } From 6c4352de0722d2c440eebaf0205508722cd0c7ae Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Tue, 18 Mar 2025 17:53:11 -0700 Subject: [PATCH 350/623] fix: table cells format --- .../components/chat/Messages/Markdown/MarkdownTokens.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte index 678caf7eca3..2d5e7a30eaf 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte @@ -123,7 +123,7 @@ class="px-3! py-1.5! cursor-pointer border border-gray-100 dark:border-gray-850" style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`} > -
+
-
+
Date: Wed, 19 Mar 2025 11:25:16 +0900 Subject: [PATCH 351/623] feat: add clear ("X") button to sidebar chat search input --- src/lib/components/chat/SettingsModal.svelte | 1 - src/lib/components/layout/Sidebar.svelte | 1 + .../layout/Sidebar/SearchInput.svelte | 20 ++++++++++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/SettingsModal.svelte b/src/lib/components/chat/SettingsModal.svelte index 20d70505fe4..7d32a9718cd 100644 --- a/src/lib/components/chat/SettingsModal.svelte +++ b/src/lib/components/chat/SettingsModal.svelte @@ -15,7 +15,6 @@ import Chats from './Settings/Chats.svelte'; import User from '../icons/User.svelte'; import Personalization from './Settings/Personalization.svelte'; - import SearchInput from '../layout/Sidebar/SearchInput.svelte'; import Search from '../icons/Search.svelte'; import Connections from './Settings/Connections.svelte'; diff --git a/src/lib/components/layout/Sidebar.svelte b/src/lib/components/layout/Sidebar.svelte index 0ab13e6ad7c..4bd48ef18ca 100644 --- a/src/lib/components/layout/Sidebar.svelte +++ b/src/lib/components/layout/Sidebar.svelte @@ -611,6 +611,7 @@ bind:value={search} on:input={searchDebounceHandler} placeholder={$i18n.t('Search')} + showClearButton={true} />
diff --git a/src/lib/components/layout/Sidebar/SearchInput.svelte b/src/lib/components/layout/Sidebar/SearchInput.svelte index eddc5b06943..c1438cede6e 100644 --- a/src/lib/components/layout/Sidebar/SearchInput.svelte +++ b/src/lib/components/layout/Sidebar/SearchInput.svelte @@ -3,12 +3,14 @@ import { tags } from '$lib/stores'; import { getContext, createEventDispatcher, onMount, onDestroy, tick } from 'svelte'; import { fade } from 'svelte/transition'; + import XMark from '$lib/components/icons/XMark.svelte'; const dispatch = createEventDispatcher(); const i18n = getContext('i18n'); export let placeholder = ''; export let value = ''; + export let showClearButton = false; let selectedIdx = 0; @@ -59,6 +61,11 @@ loading = false; }; + const clearSearchInput = () => { + value = ''; + dispatch('input'); + }; + const documentClickHandler = (e) => { const searchContainer = document.getElementById('search-container'); const chatSearch = document.getElementById('chat-search'); @@ -98,7 +105,7 @@
{ @@ -140,6 +147,17 @@ } }} /> + + {#if showClearButton && value} +
+ +
+ {/if}
{#if focused && (filteredOptions.length > 0 || filteredTags.length > 0)} From 3e8546135d2380f08c2551923dd1e2f84e711934 Mon Sep 17 00:00:00 2001 From: leilibj Date: Wed, 19 Mar 2025 13:04:34 +0800 Subject: [PATCH 352/623] fix: correct incorrect usage of log.exception method --- backend/open_webui/retrieval/web/utils.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/backend/open_webui/retrieval/web/utils.py b/backend/open_webui/retrieval/web/utils.py index 538321372b6..2b1346d7bb1 100644 --- a/backend/open_webui/retrieval/web/utils.py +++ b/backend/open_webui/retrieval/web/utils.py @@ -227,7 +227,7 @@ def lazy_load(self) -> Iterator[Document]: yield from loader.lazy_load() except Exception as e: if self.continue_on_failure: - log.exception(e, "Error loading %s", url) + log.exception(f"Error loading {url}: {e}") continue raise e @@ -247,7 +247,7 @@ async def alazy_load(self): yield document except Exception as e: if self.continue_on_failure: - log.exception(e, "Error loading %s", url) + log.exception(f"Error loading {url}: {e}") continue raise e @@ -326,7 +326,7 @@ def lazy_load(self) -> Iterator[Document]: yield from loader.lazy_load() except Exception as e: if self.continue_on_failure: - log.exception(e, "Error extracting content from URLs") + log.exception(f"Error extracting content from URLs: {e}") else: raise e @@ -359,7 +359,7 @@ async def alazy_load(self) -> AsyncIterator[Document]: yield document except Exception as e: if self.continue_on_failure: - log.exception(e, "Error loading URLs") + log.exception(f"Error loading URLs: {e}") else: raise e @@ -440,7 +440,7 @@ def lazy_load(self) -> Iterator[Document]: yield Document(page_content=text, metadata=metadata) except Exception as e: if self.continue_on_failure: - log.exception(e, "Error loading %s", url) + log.exception(f"Error loading {url}: {e}") continue raise e browser.close() @@ -471,7 +471,7 @@ async def alazy_load(self) -> AsyncIterator[Document]: yield Document(page_content=text, metadata=metadata) except Exception as e: if self.continue_on_failure: - log.exception(e, "Error loading %s", url) + log.exception(f"Error loading {url}: {e}") continue raise e await browser.close() @@ -557,7 +557,7 @@ def lazy_load(self) -> Iterator[Document]: yield Document(page_content=text, metadata=metadata) except Exception as e: # Log the error and continue with the next URL - log.exception(e, "Error loading %s", path) + log.exception(f"Error loading {path}: {e}") async def alazy_load(self) -> AsyncIterator[Document]: """Async lazy load text from the url(s) in web_path.""" From 11f2aaf7b14b43f481eb9530488c75df919637d8 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Wed, 19 Mar 2025 21:55:15 +0900 Subject: [PATCH 353/623] feat: Automatically enter edit mode when creating a new folder --- src/lib/components/layout/Sidebar.svelte | 7 +++++++ src/lib/components/layout/Sidebar/RecursiveFolder.svelte | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/src/lib/components/layout/Sidebar.svelte b/src/lib/components/layout/Sidebar.svelte index 0ab13e6ad7c..144b03b0ba1 100644 --- a/src/lib/components/layout/Sidebar.svelte +++ b/src/lib/components/layout/Sidebar.svelte @@ -77,6 +77,7 @@ let allChatsLoaded = false; let folders = {}; + let newFolderId = null; const initFolders = async () => { const folderList = await getFolders(localStorage.token).catch((error) => { @@ -90,6 +91,11 @@ for (const folder of folderList) { // Ensure folder is added to folders with its data folders[folder.id] = { ...(folders[folder.id] || {}), ...folder }; + + if (newFolderId && folder.id === newFolderId) { + folders[folder.id].isNew = true; + newFolderId = null; + } } // Second pass: Tie child folders to their parents @@ -150,6 +156,7 @@ }); if (res) { + newFolderId = res.id; await initFolders(); } }; diff --git a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte index 085eb683b8b..461b82202df 100644 --- a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte +++ b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte @@ -215,6 +215,14 @@ // Event listener for when dragging ends folderElement.addEventListener('dragend', onDragEnd); } + + if (folders[folderId].isNew) { + folders[folderId].isNew = false; + + setTimeout(() => { + editHandler(); + }, 100); + } }); onDestroy(() => { From ec8fc727b825df04d40614a04942c50d003ecaef Mon Sep 17 00:00:00 2001 From: Marko Henning Date: Wed, 19 Mar 2025 16:06:10 +0100 Subject: [PATCH 354/623] Fix wrong order for chromadb --- backend/open_webui/retrieval/utils.py | 31 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 029a33a56cd..b05057b28fe 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -178,8 +178,7 @@ def merge_and_sort_query_results( query_results: list[dict], k: int, reverse: bool = False ) -> dict: # Initialize lists to store combined data - combined = [] - seen_hashes = set() # To store unique document hashes + combined = dict() # To store documents with unique document hashes for data in query_results: distances = data["distances"][0] @@ -192,10 +191,19 @@ def merge_and_sort_query_results( document.encode() ).hexdigest() # Compute a hash for uniqueness - if doc_hash not in seen_hashes: - seen_hashes.add(doc_hash) - combined.append((distance, document, metadata)) + if doc_hash not in combined.keys(): + combined[doc_hash] = (distance, document, metadata) + continue # if doc is new, no further comparison is needed + # if doc is alredy in, but new distance is better, update + if not reverse and distance < combined[doc_hash][0]: + # Chroma uses unconventional cosine similarity, so we don't need to reverse the results + # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections + combined[doc_hash] = (distance, document, metadata) + if reverse and distance > combined[doc_hash][0]: + combined[doc_hash] = (distance, document, metadata) + + combined = list(combined.values()) # Sort the list based on distances combined.sort(key=lambda x: x[0], reverse=reverse) @@ -204,6 +212,12 @@ def merge_and_sort_query_results( zip(*combined[:k]) if combined else ([], [], []) ) + # if chromaDB, the distance is 0 (best) to 2 (worse) + # re-order to -1 (worst) to 1 (best) for relevance score + if not reverse: + sorted_distances = tuple(-dist for dist in sorted_distances) + sorted_distances = tuple(dist + 1 for dist in sorted_distances) + # Create and return the output dictionary return { "distances": [list(sorted_distances)], @@ -294,12 +308,7 @@ def query_collection_with_hybrid_search( "Hybrid search failed for all collections. Using Non hybrid search as fallback." ) - if VECTOR_DB == "chroma": - # Chroma uses unconventional cosine similarity, so we don't need to reverse the results - # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections - return merge_and_sort_query_results(results, k=k, reverse=False) - else: - return merge_and_sort_query_results(results, k=k, reverse=True) + return merge_and_sort_query_results(results, k=k, reverse=True) def get_embedding_function( From 18a8a375aba438953cbab382eddcc2a17d0de9e8 Mon Sep 17 00:00:00 2001 From: hurxxxx Date: Thu, 20 Mar 2025 00:27:53 +0900 Subject: [PATCH 355/623] fix: Enter edit mode with text pre-selected --- src/lib/components/layout/Sidebar/RecursiveFolder.svelte | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte index 461b82202df..334eb80bfa3 100644 --- a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte +++ b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte @@ -309,10 +309,13 @@ await tick(); - // focus on the input + // focus on the input and select all text setTimeout(() => { const input = document.getElementById(`folder-${folderId}-input`); - input.focus(); + if (input) { + input.focus(); + input.select(); + } }, 100); }; From f806ab0bd295bbf6feb50d4512f5e1f3d9dcf9cf Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 19 Mar 2025 08:32:31 -0700 Subject: [PATCH 356/623] refac --- backend/open_webui/env.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 6c4d151b0e7..27cc3a9a4d4 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -391,7 +391,7 @@ def parse_section(section): AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = os.environ.get( "AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST", - os.environ.get("AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", ""), + os.environ.get("AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", "10"), ) if AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST == "": @@ -400,7 +400,7 @@ def parse_section(section): try: AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = int(AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST) except Exception: - AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = 5 + AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST = 10 #################################### # OFFLINE_MODE From c69d1c86fe8d0a827515f5018af8dbc2c6457edd Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 19 Mar 2025 08:36:41 -0700 Subject: [PATCH 357/623] enh: apply file size limit to knowledge --- .../workspace/Knowledge/KnowledgeBase.svelte | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte b/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte index 415df52a1a8..07ca0f1ed9e 100644 --- a/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte +++ b/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte @@ -9,7 +9,7 @@ import { goto } from '$app/navigation'; import { page } from '$app/stores'; - import { mobile, showSidebar, knowledge as _knowledge } from '$lib/stores'; + import { mobile, showSidebar, knowledge as _knowledge, config } from '$lib/stores'; import { updateFileDataContentById, uploadFile, deleteFileById } from '$lib/apis/files'; import { @@ -131,6 +131,22 @@ return null; } + if ( + ($config?.file?.max_size ?? null) !== null && + file.size > ($config?.file?.max_size ?? 0) * 1024 * 1024 + ) { + console.log('File exceeds max size limit:', { + fileSize: file.size, + maxSize: ($config?.file?.max_size ?? 0) * 1024 * 1024 + }); + toast.error( + $i18n.t(`File size should not exceed {{maxSize}} MB.`, { + maxSize: $config?.file?.max_size + }) + ); + return; + } + knowledge.files = [...(knowledge.files ?? []), fileItem]; try { From 70550e41fcbd24f821e5de365acdc8c7e03a9c64 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 19 Mar 2025 08:47:31 -0700 Subject: [PATCH 358/623] enh: user groups/permissions endpoint --- backend/open_webui/routers/users.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index 872212d3ce1..f5349faa36a 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -2,6 +2,7 @@ from typing import Optional from open_webui.models.auths import Auths +from open_webui.models.groups import Groups from open_webui.models.chats import Chats from open_webui.models.users import ( UserModel, @@ -17,7 +18,10 @@ from open_webui.env import SRC_LOG_LEVELS from fastapi import APIRouter, Depends, HTTPException, Request, status from pydantic import BaseModel + from open_webui.utils.auth import get_admin_user, get_password_hash, get_verified_user +from open_webui.utils.access_control import get_permissions + log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MODELS"]) @@ -45,7 +49,7 @@ async def get_users( @router.get("/groups") async def get_user_groups(user=Depends(get_verified_user)): - return Users.get_user_groups(user.id) + return Groups.get_groups_by_member_id(user.id) ############################ @@ -54,8 +58,12 @@ async def get_user_groups(user=Depends(get_verified_user)): @router.get("/permissions") -async def get_user_permissisions(user=Depends(get_verified_user)): - return Users.get_user_groups(user.id) +async def get_user_permissisions(request: Request, user=Depends(get_verified_user)): + user_permissions = get_permissions( + user.id, request.app.state.config.USER_PERMISSIONS + ) + + return user_permissions ############################ @@ -89,7 +97,7 @@ class UserPermissions(BaseModel): @router.get("/default/permissions", response_model=UserPermissions) -async def get_user_permissions(request: Request, user=Depends(get_admin_user)): +async def get_default_user_permissions(request: Request, user=Depends(get_admin_user)): return { "workspace": WorkspacePermissions( **request.app.state.config.USER_PERMISSIONS.get("workspace", {}) @@ -104,7 +112,7 @@ async def get_user_permissions(request: Request, user=Depends(get_admin_user)): @router.post("/default/permissions") -async def update_user_permissions( +async def update_default_user_permissions( request: Request, form_data: UserPermissions, user=Depends(get_admin_user) ): request.app.state.config.USER_PERMISSIONS = form_data.model_dump() From 5f48af5b9114d480d7c9bea71c298f1fde001563 Mon Sep 17 00:00:00 2001 From: Marko Henning Date: Wed, 19 Mar 2025 17:04:45 +0100 Subject: [PATCH 359/623] Revert the ordering change with chromadb, not necessary with reranker results --- backend/open_webui/retrieval/utils.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 1afb333b11b..d50d4d44c91 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -146,12 +146,7 @@ def query_doc_with_hybrid_search( # retrieve only min(k, k_reranker) items, sort and cut by distance if k < k_reranker if k < k_reranker: - if VECTOR_DB == "chroma": - # Chroma uses unconventional cosine similarity, so we don't need to reverse the results - # https://docs.trychroma.com/docs/collections/configure#configuring-chroma-collections - sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=False) - else: - sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True) + sorted_items = sorted(zip(distances, metadatas, documents), key=lambda x: x[0], reverse=True) sorted_items = sorted_items[:k] distances, documents, metadatas = map(list, zip(*sorted_items)) result = { From 07098c6352367de5f0bbbce4b7cf9e28af1e3a39 Mon Sep 17 00:00:00 2001 From: genjuro Date: Thu, 20 Mar 2025 14:58:38 +0800 Subject: [PATCH 360/623] perf: set shorter timeout for playwright and make it configurable --- backend/open_webui/config.py | 6 ++++++ backend/open_webui/main.py | 2 ++ backend/open_webui/retrieval/web/utils.py | 18 ++++++++++++++---- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index d153c7ddad7..c25e0e046a2 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -2081,6 +2081,12 @@ class BannerModel(BaseModel): os.environ.get("PLAYWRIGHT_WS_URI", None), ) +PLAYWRIGHT_GOTO_TIMEOUT = PersistentConfig( + "PLAYWRIGHT_GOTO_TIMEOUT", + "rag.web.loader.engine.playwright.goto.timeout", + int(os.environ.get("PLAYWRIGHT_GOTO_TIMEOUT", "10")), +) + FIRECRAWL_API_KEY = PersistentConfig( "FIRECRAWL_API_KEY", "firecrawl.api_key", diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 1ea79aa2634..228c92e6447 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -155,6 +155,7 @@ AUDIO_TTS_AZURE_SPEECH_REGION, AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT, PLAYWRIGHT_WS_URI, + PLAYWRIGHT_GOTO_TIMEOUT, FIRECRAWL_API_BASE_URL, FIRECRAWL_API_KEY, RAG_WEB_LOADER_ENGINE, @@ -629,6 +630,7 @@ async def lifespan(app: FastAPI): app.state.config.RAG_WEB_LOADER_ENGINE = RAG_WEB_LOADER_ENGINE app.state.config.RAG_WEB_SEARCH_TRUST_ENV = RAG_WEB_SEARCH_TRUST_ENV app.state.config.PLAYWRIGHT_WS_URI = PLAYWRIGHT_WS_URI +app.state.config.PLAYWRIGHT_GOTO_TIMEOUT = PLAYWRIGHT_GOTO_TIMEOUT app.state.config.FIRECRAWL_API_BASE_URL = FIRECRAWL_API_BASE_URL app.state.config.FIRECRAWL_API_KEY = FIRECRAWL_API_KEY app.state.config.TAVILY_EXTRACT_DEPTH = TAVILY_EXTRACT_DEPTH diff --git a/backend/open_webui/retrieval/web/utils.py b/backend/open_webui/retrieval/web/utils.py index 2b1346d7bb1..0eee00879e1 100644 --- a/backend/open_webui/retrieval/web/utils.py +++ b/backend/open_webui/retrieval/web/utils.py @@ -29,6 +29,7 @@ from open_webui.config import ( ENABLE_RAG_LOCAL_WEB_FETCH, PLAYWRIGHT_WS_URI, + PLAYWRIGHT_GOTO_TIMEOUT, RAG_WEB_LOADER_ENGINE, FIRECRAWL_API_BASE_URL, FIRECRAWL_API_KEY, @@ -376,6 +377,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader, RateLimitMixin, URLProcessing headless (bool): If True, the browser will run in headless mode. proxy (dict): Proxy override settings for the Playwright session. playwright_ws_url (Optional[str]): WebSocket endpoint URI for remote browser connection. + playwright_goto_timeout (Optional[int]): Maximum operation time in milliseconds. """ def __init__( @@ -389,6 +391,7 @@ def __init__( remove_selectors: Optional[List[str]] = None, proxy: Optional[Dict[str, str]] = None, playwright_ws_url: Optional[str] = None, + playwright_goto_timeout: Optional[int] = 10000, ): """Initialize with additional safety parameters and remote browser support.""" @@ -415,6 +418,7 @@ def __init__( self.last_request_time = None self.playwright_ws_url = playwright_ws_url self.trust_env = trust_env + self.playwright_goto_timeout = playwright_goto_timeout def lazy_load(self) -> Iterator[Document]: """Safely load URLs synchronously with support for remote browser.""" @@ -431,7 +435,7 @@ def lazy_load(self) -> Iterator[Document]: try: self._safe_process_url_sync(url) page = browser.new_page() - response = page.goto(url) + response = page.goto(url, timeout=self.playwright_goto_timeout) if response is None: raise ValueError(f"page.goto() returned None for url {url}") @@ -462,7 +466,9 @@ async def alazy_load(self) -> AsyncIterator[Document]: try: await self._safe_process_url(url) page = await browser.new_page() - response = await page.goto(url) + response = await page.goto( + url, timeout=self.playwright_goto_timeout + ) if response is None: raise ValueError(f"page.goto() returned None for url {url}") @@ -604,8 +610,12 @@ def get_web_loader( "trust_env": trust_env, } - if PLAYWRIGHT_WS_URI.value: - web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URI.value + if RAG_WEB_LOADER_ENGINE.value == "playwright": + web_loader_args["playwright_goto_timeout"] = ( + PLAYWRIGHT_GOTO_TIMEOUT.value * 1000 + ) + if PLAYWRIGHT_WS_URI.value: + web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URI.value if RAG_WEB_LOADER_ENGINE.value == "firecrawl": web_loader_args["api_key"] = FIRECRAWL_API_KEY.value From 2bdf77a726e2ce69de5511fcf702227bd6371a32 Mon Sep 17 00:00:00 2001 From: Diwakar Date: Thu, 20 Mar 2025 22:20:27 +0700 Subject: [PATCH 361/623] Fix error message propagate from pipelines Error message returned from pipelines was not being shown on UI. It showed "Connection closed". With this fix it will show the error message on the UI from the pipeline properly. --- backend/open_webui/routers/pipelines.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/routers/pipelines.py b/backend/open_webui/routers/pipelines.py index 599208e43d0..10c8e9b2ec9 100644 --- a/backend/open_webui/routers/pipelines.py +++ b/backend/open_webui/routers/pipelines.py @@ -90,8 +90,8 @@ async def process_pipeline_inlet_filter(request, payload, user, models): headers=headers, json=request_data, ) as response: - response.raise_for_status() payload = await response.json() + response.raise_for_status() except aiohttp.ClientResponseError as e: res = ( await response.json() @@ -139,8 +139,8 @@ async def process_pipeline_outlet_filter(request, payload, user, models): headers=headers, json=request_data, ) as response: - response.raise_for_status() payload = await response.json() + response.raise_for_status() except aiohttp.ClientResponseError as e: try: res = ( From 5b276471b3e4010c84b405efa4dfc92a5b244b0c Mon Sep 17 00:00:00 2001 From: Alluuu <22728104+alluuu@users.noreply.github.com> Date: Thu, 20 Mar 2025 18:42:45 +0200 Subject: [PATCH 362/623] Added Estonian language translations. Tried to organize language list, following existing pattern as best, as I could tell: English first, Alphabetical languages, finished by Chinese and Doge --- src/lib/i18n/locales/et-EE/translation.json | 1178 +++++++++++++++++++ src/lib/i18n/locales/languages.json | 66 +- 2 files changed, 1213 insertions(+), 31 deletions(-) create mode 100644 src/lib/i18n/locales/et-EE/translation.json diff --git a/src/lib/i18n/locales/et-EE/translation.json b/src/lib/i18n/locales/et-EE/translation.json new file mode 100644 index 00000000000..0065f8871ac --- /dev/null +++ b/src/lib/i18n/locales/et-EE/translation.json @@ -0,0 +1,1178 @@ +{ + "-1 for no limit, or a positive integer for a specific limit": "-1 piirangu puudumisel või positiivne täisarv konkreetse piirangu jaoks", + "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' või '-1' aegumiseta.", + "(e.g. `sh webui.sh --api --api-auth username_password`)": "(nt `sh webui.sh --api --api-auth kasutajanimi_parool`)", + "(e.g. `sh webui.sh --api`)": "(nt `sh webui.sh --api`)", + "(latest)": "(uusim)", + "{{ models }}": "{{ mudelid }}", + "{{COUNT}} hidden lines": "{{COUNT}} peidetud rida", + "{{COUNT}} Replies": "{{COUNT}} vastust", + "{{user}}'s Chats": "{{user}} vestlused", + "{{webUIName}} Backend Required": "{{webUIName}} taustaserver on vajalik", + "*Prompt node ID(s) are required for image generation": "*Vihje sõlme ID(d) on piltide genereerimiseks vajalikud", + "A new version (v{{LATEST_VERSION}}) is now available.": "Uus versioon (v{{LATEST_VERSION}}) on saadaval.", + "A task model is used when performing tasks such as generating titles for chats and web search queries": "Ülesande mudelit kasutatakse selliste toimingute jaoks nagu vestluste pealkirjade ja veebiotsingu päringute genereerimine", + "a user": "kasutaja", + "About": "Teave", + "Accept autocomplete generation / Jump to prompt variable": "Nõustu automaattäitmisega / Liigu vihjete muutujale", + "Access": "Juurdepääs", + "Access Control": "Juurdepääsu kontroll", + "Accessible to all users": "Kättesaadav kõigile kasutajatele", + "Account": "Konto", + "Account Activation Pending": "Konto aktiveerimine ootel", + "Accurate information": "Täpne informatsioon", + "Actions": "Toimingud", + "Activate": "Aktiveeri", + "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Aktiveeri see käsk, trükkides \"/{{COMMAND}}\" vestluse sisendritta.", + "Active Users": "Aktiivsed kasutajad", + "Add": "Lisa", + "Add a model ID": "Lisa mudeli ID", + "Add a short description about what this model does": "Lisa lühike kirjeldus, mida see mudel teeb", + "Add a tag": "Lisa silt", + "Add Arena Model": "Lisa Areena mudel", + "Add Connection": "Lisa ühendus", + "Add Content": "Lisa sisu", + "Add content here": "Lisa siia sisu", + "Add custom prompt": "Lisa kohandatud vihjeid", + "Add Files": "Lisa faile", + "Add Group": "Lisa grupp", + "Add Memory": "Lisa mälu", + "Add Model": "Lisa mudel", + "Add Reaction": "Lisa reaktsioon", + "Add Tag": "Lisa silt", + "Add Tags": "Lisa silte", + "Add text content": "Lisa tekstisisu", + "Add User": "Lisa kasutaja", + "Add User Group": "Lisa kasutajagrupp", + "Adjusting these settings will apply changes universally to all users.": "Nende seadete kohandamine rakendab muudatused universaalselt kõigile kasutajatele.", + "admin": "admin", + "Admin": "Administraator", + "Admin Panel": "Administraatori paneel", + "Admin Settings": "Administraatori seaded", + "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administraatoritel on alati juurdepääs kõigile tööriistadele; kasutajatele tuleb tööriistad määrata mudeli põhiselt tööruumis.", + "Advanced Parameters": "Täpsemad parameetrid", + "Advanced Params": "Täpsemad parameetrid", + "All": "Kõik", + "All Documents": "Kõik dokumendid", + "All models deleted successfully": "Kõik mudelid edukalt kustutatud", + "Allow Chat Controls": "Luba vestluse kontrollnupud", + "Allow Chat Delete": "Luba vestluse kustutamine", + "Allow Chat Deletion": "Luba vestluse kustutamine", + "Allow Chat Edit": "Luba vestluse muutmine", + "Allow File Upload": "Luba failide üleslaadimine", + "Allow non-local voices": "Luba mitte-lokaalsed hääled", + "Allow Temporary Chat": "Luba ajutine vestlus", + "Allow User Location": "Luba kasutaja asukoht", + "Allow Voice Interruption in Call": "Luba hääle katkestamine kõnes", + "Allowed Endpoints": "Lubatud lõpp-punktid", + "Already have an account?": "Kas teil on juba konto?", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternatiiv top_p-le ja eesmärk on tagada kvaliteedi ja mitmekesisuse tasakaal. Parameeter p esindab minimaalset tõenäosust tokeni arvesse võtmiseks, võrreldes kõige tõenäolisema tokeni tõenäosusega. Näiteks p=0.05 korral, kui kõige tõenäolisema tokeni tõenäosus on 0.9, filtreeritakse välja logitid väärtusega alla 0.045.", + "Always": "Alati", + "Amazing": "Suurepärane", + "an assistant": "assistent", + "Analyzed": "Analüüsitud", + "Analyzing...": "Analüüsimine...", + "and": "ja", + "and {{COUNT}} more": "ja veel {{COUNT}}", + "and create a new shared link.": "ja looge uus jagatud link.", + "API Base URL": "API baas-URL", + "API Key": "API võti", + "API Key created.": "API võti loodud.", + "API Key Endpoint Restrictions": "API võtme lõpp-punkti piirangud", + "API keys": "API võtmed", + "Application DN": "Rakenduse DN", + "Application DN Password": "Rakenduse DN parool", + "applies to all users with the \"user\" role": "kehtib kõigile kasutajatele \"kasutaja\" rolliga", + "April": "Aprill", + "Archive": "Arhiveeri", + "Archive All Chats": "Arhiveeri kõik vestlused", + "Archived Chats": "Arhiveeritud vestlused", + "archived-chat-export": "arhiveeritud-vestluste-eksport", + "Are you sure you want to clear all memories? This action cannot be undone.": "Kas olete kindel, et soovite kustutada kõik mälestused? Seda toimingut ei saa tagasi võtta.", + "Are you sure you want to delete this channel?": "Kas olete kindel, et soovite selle kanali kustutada?", + "Are you sure you want to delete this message?": "Kas olete kindel, et soovite selle sõnumi kustutada?", + "Are you sure you want to unarchive all archived chats?": "Kas olete kindel, et soovite kõik arhiveeritud vestlused arhiivist eemaldada?", + "Are you sure?": "Kas olete kindel?", + "Arena Models": "Areena mudelid", + "Artifacts": "Tekkinud objektid", + "Ask": "Küsi", + "Ask a question": "Esita küsimus", + "Assistant": "Assistent", + "Attach file from knowledge": "Lisa fail teadmiste baasist", + "Attention to detail": "Tähelepanu detailidele", + "Attribute for Mail": "E-posti atribuut", + "Attribute for Username": "Kasutajanime atribuut", + "Audio": "Heli", + "August": "August", + "Authenticate": "Autendi", + "Authentication": "Autentimine", + "Auto-Copy Response to Clipboard": "Kopeeri vastus automaatselt lõikelauale", + "Auto-playback response": "Mängi vastus automaatselt", + "Autocomplete Generation": "Automaattäitmise genereerimine", + "Autocomplete Generation Input Max Length": "Automaattäitmise genereerimise sisendi maksimaalne pikkus", + "Automatic1111": "Automatic1111", + "AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 API autentimise string", + "AUTOMATIC1111 Base URL": "AUTOMATIC1111 baas-URL", + "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 baas-URL on nõutav.", + "Available list": "Saadaolevate nimekiri", + "available!": "saadaval!", + "Awful": "Kohutav", + "Azure AI Speech": "Azure AI Kõne", + "Azure Region": "Azure regioon", + "Back": "Tagasi", + "Bad Response": "Halb vastus", + "Banners": "Bännerid", + "Base Model (From)": "Baas mudel (Allikas)", + "Batch Size (num_batch)": "Partii suurus (num_batch)", + "before": "enne", + "Being lazy": "Laisklemine", + "Beta": "Beeta", + "Bing Search V7 Endpoint": "Bing Search V7 lõpp-punkt", + "Bing Search V7 Subscription Key": "Bing Search V7 tellimuse võti", + "Bocha Search API Key": "Bocha otsingu API võti", + "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Konkreetsete tokenite võimendamine või karistamine piiratud vastuste jaoks. Kallutatuse väärtused piiratakse vahemikku -100 kuni 100 (kaasa arvatud). (Vaikimisi: puudub)", + "Brave Search API Key": "Brave Search API võti", + "By {{name}}": "Autor: {{name}}", + "Bypass Embedding and Retrieval": "Möödaminek sisestamisest ja taastamisest", + "Bypass SSL verification for Websites": "Möödaminek veebisaitide SSL-kontrollimisest", + "Calendar": "Kalender", + "Call": "Kõne", + "Call feature is not supported when using Web STT engine": "Kõnefunktsioon ei ole Web STT mootorit kasutades toetatud", + "Camera": "Kaamera", + "Cancel": "Tühista", + "Capabilities": "Võimekused", + "Capture": "Jäädvusta", + "Certificate Path": "Sertifikaadi tee", + "Change Password": "Muuda parooli", + "Channel Name": "Kanali nimi", + "Channels": "Kanalid", + "Character": "Tegelane", + "Character limit for autocomplete generation input": "Märkide piirang automaattäitmise genereerimise sisendile", + "Chart new frontiers": "Kaardista uusi piire", + "Chat": "Vestlus", + "Chat Background Image": "Vestluse taustapilt", + "Chat Bubble UI": "Vestlusmullide kasutajaliides", + "Chat Controls": "Vestluse juhtnupud", + "Chat direction": "Vestluse suund", + "Chat Overview": "Vestluse ülevaade", + "Chat Permissions": "Vestluse õigused", + "Chat Tags Auto-Generation": "Vestluse siltide automaatnegeneerimine", + "Chats": "Vestlused", + "Check Again": "Kontrolli uuesti", + "Check for updates": "Kontrolli uuendusi", + "Checking for updates...": "Uuenduste kontrollimine...", + "Choose a model before saving...": "Valige mudel enne salvestamist...", + "Chunk Overlap": "Tükkide ülekate", + "Chunk Size": "Tüki suurus", + "Ciphers": "Šifrid", + "Citation": "Viide", + "Clear memory": "Tühjenda mälu", + "Clear Memory": "Tühjenda mälu", + "click here": "klõpsake siia", + "Click here for filter guides.": "Filtri juhiste jaoks klõpsake siia.", + "Click here for help.": "Abi saamiseks klõpsake siia.", + "Click here to": "Klõpsake siia, et", + "Click here to download user import template file.": "Klõpsake siia kasutajate importimise mallifaili allalaadimiseks.", + "Click here to learn more about faster-whisper and see the available models.": "Klõpsake siia, et teada saada rohkem faster-whisper kohta ja näha saadaolevaid mudeleid.", + "Click here to see available models.": "Klõpsake siia, et näha saadaolevaid mudeleid.", + "Click here to select": "Klõpsake siia valimiseks", + "Click here to select a csv file.": "Klõpsake siia csv-faili valimiseks.", + "Click here to select a py file.": "Klõpsake siia py-faili valimiseks.", + "Click here to upload a workflow.json file.": "Klõpsake siia workflow.json faili üleslaadimiseks.", + "click here.": "klõpsake siia.", + "Click on the user role button to change a user's role.": "Kasutaja rolli muutmiseks klõpsake kasutaja rolli nuppu.", + "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Lõikelaua kirjutamisõigust ei antud. Kontrollige oma brauseri seadeid, et anda vajalik juurdepääs.", + "Clone": "Klooni", + "Clone Chat": "Klooni vestlus", + "Clone of {{TITLE}}": "{{TITLE}} koopia", + "Close": "Sulge", + "Code execution": "Koodi täitmine", + "Code Execution": "Koodi täitmine", + "Code Execution Engine": "Koodi täitmise mootor", + "Code Execution Timeout": "Koodi täitmise aegumine", + "Code formatted successfully": "Kood vormindatud edukalt", + "Code Interpreter": "Koodi interpretaator", + "Code Interpreter Engine": "Koodi interpretaatori mootor", + "Code Interpreter Prompt Template": "Koodi interpretaatori vihje mall", + "Collapse": "Ahenda", + "Collection": "Kogu", + "Color": "Värv", + "ComfyUI": "ComfyUI", + "ComfyUI API Key": "ComfyUI API võti", + "ComfyUI Base URL": "ComfyUI baas-URL", + "ComfyUI Base URL is required.": "ComfyUI baas-URL on nõutav.", + "ComfyUI Workflow": "ComfyUI töövoog", + "ComfyUI Workflow Nodes": "ComfyUI töövoo sõlmed", + "Command": "Käsk", + "Completions": "Lõpetamised", + "Concurrent Requests": "Samaaegsed päringud", + "Configure": "Konfigureeri", + "Confirm": "Kinnita", + "Confirm Password": "Kinnita parool", + "Confirm your action": "Kinnita oma toiming", + "Confirm your new password": "Kinnita oma uus parool", + "Connect to your own OpenAI compatible API endpoints.": "Ühendu oma OpenAI-ga ühilduvate API lõpp-punktidega.", + "Connections": "Ühendused", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Piirab arutluse pingutust arutlusvõimelistele mudelitele. Kohaldatav ainult konkreetsete pakkujate arutlusmudelitele, mis toetavad arutluspingutust.", + "Contact Admin for WebUI Access": "Võtke WebUI juurdepääsu saamiseks ühendust administraatoriga", + "Content": "Sisu", + "Content Extraction Engine": "Sisu ekstraheerimise mootor", + "Context Length": "Konteksti pikkus", + "Continue Response": "Jätka vastust", + "Continue with {{provider}}": "Jätka {{provider}}-ga", + "Continue with Email": "Jätka e-postiga", + "Continue with LDAP": "Jätka LDAP-ga", + "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrolli, kuidas sõnumitekst on jagatud TTS-päringute jaoks. 'Kirjavahemärgid' jagab lauseteks, 'lõigud' jagab lõikudeks ja 'puudub' hoiab sõnumi ühe stringina.", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Kontrollige tokeni järjestuste kordumist genereeritud tekstis. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 1,1) on leebem. Väärtuse 1 korral on see keelatud.", + "Controls": "Juhtnupud", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "Kontrollib väljundi sidususe ja mitmekesisuse vahelist tasakaalu. Madalam väärtus annab tulemuseks fokuseerituma ja sidusamaja teksti.", + "Copied": "Kopeeritud", + "Copied shared chat URL to clipboard!": "Jagatud vestluse URL kopeeritud lõikelauale!", + "Copied to clipboard": "Kopeeritud lõikelauale", + "Copy": "Kopeeri", + "Copy last code block": "Kopeeri viimane koodiplokk", + "Copy last response": "Kopeeri viimane vastus", + "Copy Link": "Kopeeri link", + "Copy to clipboard": "Kopeeri lõikelauale", + "Copying to clipboard was successful!": "Lõikelauale kopeerimine õnnestus!", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "Teenusepakkuja peab nõuetekohaselt konfigureerima CORS-i, et lubada päringuid Open WebUI-lt.", + "Create": "Loo", + "Create a knowledge base": "Loo teadmiste baas", + "Create a model": "Loo mudel", + "Create Account": "Loo konto", + "Create Admin Account": "Loo administraatori konto", + "Create Channel": "Loo kanal", + "Create Group": "Loo grupp", + "Create Knowledge": "Loo teadmised", + "Create new key": "Loo uus võti", + "Create new secret key": "Loo uus salavõti", + "Created at": "Loomise aeg", + "Created At": "Loomise aeg", + "Created by": "Autor", + "CSV Import": "CSV import", + "Ctrl+Enter to Send": "Ctrl+Enter saatmiseks", + "Current Model": "Praegune mudel", + "Current Password": "Praegune parool", + "Custom": "Kohandatud", + "Danger Zone": "Ohutsoon", + "Dark": "Tume", + "Database": "Andmebaas", + "December": "Detsember", + "Default": "Vaikimisi", + "Default (Open AI)": "Vaikimisi (Open AI)", + "Default (SentenceTransformers)": "Vaikimisi (SentenceTransformers)", + "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model's built-in tool-calling capabilities, but requires the model to inherently support this feature.": "Vaikerežiim töötab laiema mudelite valikuga, kutsudes tööriistad välja enne täitmist. Kohalik režiim kasutab mudeli sisseehitatud tööriistade väljakutsumise võimalusi, kuid eeldab, et mudel toetab sisemiselt seda funktsiooni.", + "Default Model": "Vaikimisi mudel", + "Default model updated": "Vaikimisi mudel uuendatud", + "Default Models": "Vaikimisi mudelid", + "Default permissions": "Vaikimisi õigused", + "Default permissions updated successfully": "Vaikimisi õigused edukalt uuendatud", + "Default Prompt Suggestions": "Vaikimisi vihjete soovitused", + "Default to 389 or 636 if TLS is enabled": "Vaikimisi 389 või 636, kui TLS on lubatud", + "Default to ALL": "Vaikimisi KÕIK", + "Default User Role": "Vaikimisi kasutaja roll", + "Delete": "Kustuta", + "Delete a model": "Kustuta mudel", + "Delete All Chats": "Kustuta kõik vestlused", + "Delete All Models": "Kustuta kõik mudelid", + "Delete chat": "Kustuta vestlus", + "Delete Chat": "Kustuta vestlus", + "Delete chat?": "Kustutada vestlus?", + "Delete folder?": "Kustutada kaust?", + "Delete function?": "Kustutada funktsioon?", + "Delete Message": "Kustuta sõnum", + "Delete message?": "Kustutada sõnum?", + "Delete prompt?": "Kustutada vihjed?", + "delete this link": "kustuta see link", + "Delete tool?": "Kustutada tööriist?", + "Delete User": "Kustuta kasutaja", + "Deleted {{deleteModelTag}}": "Kustutatud {{deleteModelTag}}", + "Deleted {{name}}": "Kustutatud {{name}}", + "Deleted User": "Kustutatud kasutaja", + "Describe your knowledge base and objectives": "Kirjeldage oma teadmiste baasi ja eesmärke", + "Description": "Kirjeldus", + "Didn't fully follow instructions": "Ei järginud täielikult juhiseid", + "Direct Connections": "Otsesed ühendused", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Otsesed ühendused võimaldavad kasutajatel ühenduda oma OpenAI-ga ühilduvate API lõpp-punktidega.", + "Direct Connections settings updated": "Otseste ühenduste seaded uuendatud", + "Disabled": "Keelatud", + "Discover a function": "Avasta funktsioon", + "Discover a model": "Avasta mudel", + "Discover a prompt": "Avasta vihje", + "Discover a tool": "Avasta tööriist", + "Discover how to use Open WebUI and seek support from the community.": "Avastage, kuidas kasutada Open WebUI-d ja otsige tuge kogukonnalt.", + "Discover wonders": "Avasta imesid", + "Discover, download, and explore custom functions": "Avasta, laadi alla ja uuri kohandatud funktsioone", + "Discover, download, and explore custom prompts": "Avasta, laadi alla ja uuri kohandatud vihjeid", + "Discover, download, and explore custom tools": "Avasta, laadi alla ja uuri kohandatud tööriistu", + "Discover, download, and explore model presets": "Avasta, laadi alla ja uuri mudeli eelseadistusi", + "Dismissible": "Sulgetav", + "Display": "Kuva", + "Display Emoji in Call": "Kuva kõnes emoji", + "Display the username instead of You in the Chat": "Kuva vestluses 'Sina' asemel kasutajanimi", + "Displays citations in the response": "Kuvab vastuses viited", + "Dive into knowledge": "Sukeldu teadmistesse", + "Do not install functions from sources you do not fully trust.": "Ärge installige funktsioone allikatest, mida te täielikult ei usalda.", + "Do not install tools from sources you do not fully trust.": "Ärge installige tööriistu allikatest, mida te täielikult ei usalda.", + "Document": "Dokument", + "Document Intelligence": "Dokumendi intelligentsus", + "Document Intelligence endpoint and key required.": "Dokumendi intelligentsuse lõpp-punkt ja võti on nõutavad.", + "Documentation": "Dokumentatsioon", + "Documents": "Dokumendid", + "does not make any external connections, and your data stays securely on your locally hosted server.": "ei loo väliseid ühendusi ja teie andmed jäävad turvaliselt teie kohalikult majutatud serverisse.", + "Domain Filter List": "Domeeni filtri nimekiri", + "Don't have an account?": "Pole kontot?", + "don't install random functions from sources you don't trust.": "ärge installige juhuslikke funktsioone allikatest, mida te ei usalda.", + "don't install random tools from sources you don't trust.": "ärge installige juhuslikke tööriistu allikatest, mida te ei usalda.", + "Don't like the style": "Stiil ei meeldi", + "Done": "Valmis", + "Download": "Laadi alla", + "Download as SVG": "Laadi alla SVG-na", + "Download canceled": "Allalaadimine tühistatud", + "Download Database": "Laadi alla andmebaas", + "Drag and drop a file to upload or select a file to view": "Lohistage ja kukutage fail üleslaadimiseks või valige fail vaatamiseks", + "Draw": "Joonista", + "Drop any files here to add to the conversation": "Lohistage siia mistahes failid, et lisada need vestlusele", + "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "nt '30s', '10m'. Kehtivad ajaühikud on 's', 'm', 'h'.", + "e.g. 60": "nt 60", + "e.g. A filter to remove profanity from text": "nt filter, mis eemaldab tekstist roppused", + "e.g. My Filter": "nt Minu Filter", + "e.g. My Tools": "nt Minu Tööriistad", + "e.g. my_filter": "nt minu_filter", + "e.g. my_tools": "nt minu_toriistad", + "e.g. Tools for performing various operations": "nt tööriistad mitmesuguste operatsioonide teostamiseks", + "Edit": "Muuda", + "Edit Arena Model": "Muuda Areena mudelit", + "Edit Channel": "Muuda kanalit", + "Edit Connection": "Muuda ühendust", + "Edit Default Permissions": "Muuda vaikimisi õigusi", + "Edit Memory": "Muuda mälu", + "Edit User": "Muuda kasutajat", + "Edit User Group": "Muuda kasutajagruppi", + "ElevenLabs": "ElevenLabs", + "Email": "E-post", + "Embark on adventures": "Alusta seiklusi", + "Embedding": "Manustamine", + "Embedding Batch Size": "Manustamise partii suurus", + "Embedding Model": "Manustamise mudel", + "Embedding Model Engine": "Manustamise mudeli mootor", + "Embedding model set to \"{{embedding_model}}\"": "Manustamise mudel määratud kui \"{{embedding_model}}\"", + "Enable API Key": "Luba API võti", + "Enable autocomplete generation for chat messages": "Luba automaattäitmise genereerimine vestlussõnumitele", + "Enable Code Execution": "Luba koodi täitmine", + "Enable Code Interpreter": "Luba koodi interpretaator", + "Enable Community Sharing": "Luba kogukonnaga jagamine", + "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Luba mälu lukustamine (mlock), et vältida mudeli andmete vahetamist RAM-ist välja. See valik lukustab mudeli töökomplekti lehed RAM-i, tagades, et neid ei vahetata kettale. See aitab säilitada jõudlust, vältides lehevigu ja tagades kiire andmete juurdepääsu.", + "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Luba mälu kaardistamine (mmap) mudeli andmete laadimiseks. See valik võimaldab süsteemil kasutada kettamahtu RAM-i laiendusena, koheldes kettafaile nii, nagu need oleksid RAM-is. See võib parandada mudeli jõudlust, võimaldades kiiremat andmete juurdepääsu. See ei pruugi siiski kõigi süsteemidega õigesti töötada ja võib tarbida märkimisväärse koguse kettaruumi.", + "Enable Message Rating": "Luba sõnumite hindamine", + "Enable Mirostat sampling for controlling perplexity.": "Luba Mirostat'i valim perplekssuse juhtimiseks.", + "Enable New Sign Ups": "Luba uued registreerimised", + "Enabled": "Lubatud", + "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Veenduge, et teie CSV-fail sisaldab 4 veergu selles järjekorras: Nimi, E-post, Parool, Roll.", + "Enter {{role}} message here": "Sisestage {{role}} sõnum siia", + "Enter a detail about yourself for your LLMs to recall": "Sisestage detail enda kohta, mida teie LLM-id saavad meenutada", + "Enter api auth string (e.g. username:password)": "Sisestage api autentimisstring (nt kasutajanimi:parool)", + "Enter Application DN": "Sisestage rakenduse DN", + "Enter Application DN Password": "Sisestage rakenduse DN parool", + "Enter Bing Search V7 Endpoint": "Sisestage Bing Search V7 lõpp-punkt", + "Enter Bing Search V7 Subscription Key": "Sisestage Bing Search V7 tellimuse võti", + "Enter Bocha Search API Key": "Sisestage Bocha Search API võti", + "Enter Brave Search API Key": "Sisestage Brave Search API võti", + "Enter certificate path": "Sisestage sertifikaadi tee", + "Enter CFG Scale (e.g. 7.0)": "Sisestage CFG skaala (nt 7.0)", + "Enter Chunk Overlap": "Sisestage tükkide ülekate", + "Enter Chunk Size": "Sisestage tüki suurus", + "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Sisestage komadega eraldatud \"token:kallutuse_väärtus\" paarid (näide: 5432:100, 413:-100)", + "Enter description": "Sisestage kirjeldus", + "Enter Document Intelligence Endpoint": "Sisestage dokumendi intelligentsuse lõpp-punkt", + "Enter Document Intelligence Key": "Sisestage dokumendi intelligentsuse võti", + "Enter domains separated by commas (e.g., example.com,site.org)": "Sisestage domeenid komadega eraldatult (nt example.com,site.org)", + "Enter Exa API Key": "Sisestage Exa API võti", + "Enter Github Raw URL": "Sisestage Github toorURL", + "Enter Google PSE API Key": "Sisestage Google PSE API võti", + "Enter Google PSE Engine Id": "Sisestage Google PSE mootori ID", + "Enter Image Size (e.g. 512x512)": "Sisestage pildi suurus (nt 512x512)", + "Enter Jina API Key": "Sisestage Jina API võti", + "Enter Jupyter Password": "Sisestage Jupyter parool", + "Enter Jupyter Token": "Sisestage Jupyter token", + "Enter Jupyter URL": "Sisestage Jupyter URL", + "Enter Kagi Search API Key": "Sisestage Kagi Search API võti", + "Enter Key Behavior": "Sisestage võtme käitumine", + "Enter language codes": "Sisestage keelekoodid", + "Enter Model ID": "Sisestage mudeli ID", + "Enter model tag (e.g. {{modelTag}})": "Sisestage mudeli silt (nt {{modelTag}})", + "Enter Mojeek Search API Key": "Sisestage Mojeek Search API võti", + "Enter Number of Steps (e.g. 50)": "Sisestage sammude arv (nt 50)", + "Enter Perplexity API Key": "Sisestage Perplexity API võti", + "Enter proxy URL (e.g. https://user:password@host:port)": "Sisestage puhverserveri URL (nt https://kasutaja:parool@host:port)", + "Enter reasoning effort": "Sisestage arutluspingutus", + "Enter Sampler (e.g. Euler a)": "Sisestage valimismeetod (nt Euler a)", + "Enter Scheduler (e.g. Karras)": "Sisestage planeerija (nt Karras)", + "Enter Score": "Sisestage skoor", + "Enter SearchApi API Key": "Sisestage SearchApi API võti", + "Enter SearchApi Engine": "Sisestage SearchApi mootor", + "Enter Searxng Query URL": "Sisestage Searxng päringu URL", + "Enter Seed": "Sisestage seeme", + "Enter SerpApi API Key": "Sisestage SerpApi API võti", + "Enter SerpApi Engine": "Sisestage SerpApi mootor", + "Enter Serper API Key": "Sisestage Serper API võti", + "Enter Serply API Key": "Sisestage Serply API võti", + "Enter Serpstack API Key": "Sisestage Serpstack API võti", + "Enter server host": "Sisestage serveri host", + "Enter server label": "Sisestage serveri silt", + "Enter server port": "Sisestage serveri port", + "Enter stop sequence": "Sisestage lõpetamise järjestus", + "Enter system prompt": "Sisestage süsteemi vihjed", + "Enter Tavily API Key": "Sisestage Tavily API võti", + "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Sisestage oma WebUI avalik URL. Seda URL-i kasutatakse teadaannetes linkide genereerimiseks.", + "Enter Tika Server URL": "Sisestage Tika serveri URL", + "Enter timeout in seconds": "Sisestage aegumine sekundites", + "Enter to Send": "Enter saatmiseks", + "Enter Top K": "Sisestage Top K", + "Enter URL (e.g. http://127.0.0.1:7860/)": "Sisestage URL (nt http://127.0.0.1:7860/)", + "Enter URL (e.g. http://localhost:11434)": "Sisestage URL (nt http://localhost:11434)", + "Enter your current password": "Sisestage oma praegune parool", + "Enter Your Email": "Sisestage oma e-post", + "Enter Your Full Name": "Sisestage oma täisnimi", + "Enter your message": "Sisestage oma sõnum", + "Enter your new password": "Sisestage oma uus parool", + "Enter Your Password": "Sisestage oma parool", + "Enter Your Role": "Sisestage oma roll", + "Enter Your Username": "Sisestage oma kasutajanimi", + "Enter your webhook URL": "Sisestage oma webhook URL", + "Error": "Viga", + "ERROR": "VIGA", + "Error accessing Google Drive: {{error}}": "Viga Google Drive'i juurdepääsul: {{error}}", + "Error uploading file: {{error}}": "Viga faili üleslaadimisel: {{error}}", + "Evaluations": "Hindamised", + "Exa API Key": "Exa API võti", + "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Näide: (&(objectClass=inetOrgPerson)(uid=%s))", + "Example: ALL": "Näide: ALL", + "Example: mail": "Näide: mail", + "Example: ou=users,dc=foo,dc=example": "Näide: ou=users,dc=foo,dc=example", + "Example: sAMAccountName or uid or userPrincipalName": "Näide: sAMAccountName või uid või userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Ületasite litsentsis määratud istekohtade arvu. Palun võtke ühendust toega, et suurendada istekohtade arvu.", + "Exclude": "Välista", + "Execute code for analysis": "Käivita kood analüüsimiseks", + "Expand": "Laienda", + "Experimental": "Katsetuslik", + "Explain": "Selgita", + "Explain this section to me in more detail": "Selgitage seda lõiku mulle üksikasjalikumalt", + "Explore the cosmos": "Uuri kosmosest", + "Export": "Ekspordi", + "Export All Archived Chats": "Ekspordi kõik arhiveeritud vestlused", + "Export All Chats (All Users)": "Ekspordi kõik vestlused (kõik kasutajad)", + "Export chat (.json)": "Ekspordi vestlus (.json)", + "Export Chats": "Ekspordi vestlused", + "Export Config to JSON File": "Ekspordi seadistus JSON-failina", + "Export Functions": "Ekspordi funktsioonid", + "Export Models": "Ekspordi mudelid", + "Export Presets": "Ekspordi eelseadistused", + "Export Prompts": "Ekspordi vihjed", + "Export to CSV": "Ekspordi CSV-na", + "Export Tools": "Ekspordi tööriistad", + "External Models": "Välised mudelid", + "Failed to add file.": "Faili lisamine ebaõnnestus.", + "Failed to create API Key.": "API võtme loomine ebaõnnestus.", + "Failed to fetch models": "Mudelite toomine ebaõnnestus", + "Failed to read clipboard contents": "Lõikelaua sisu lugemine ebaõnnestus", + "Failed to save models configuration": "Mudelite konfiguratsiooni salvestamine ebaõnnestus", + "Failed to update settings": "Seadete uuendamine ebaõnnestus", + "Failed to upload file.": "Faili üleslaadimine ebaõnnestus.", + "Features": "Funktsioonid", + "Features Permissions": "Funktsioonide õigused", + "February": "Veebruar", + "Feedback History": "Tagasiside ajalugu", + "Feedbacks": "Tagasisided", + "Feel free to add specific details": "Võite lisada konkreetseid üksikasju", + "File": "Fail", + "File added successfully.": "Fail edukalt lisatud.", + "File content updated successfully.": "Faili sisu edukalt uuendatud.", + "File Mode": "Faili režiim", + "File not found.": "Faili ei leitud.", + "File removed successfully.": "Fail edukalt eemaldatud.", + "File size should not exceed {{maxSize}} MB.": "Faili suurus ei tohiks ületada {{maxSize}} MB.", + "File uploaded successfully": "Fail edukalt üles laaditud", + "Files": "Failid", + "Filter is now globally disabled": "Filter on nüüd globaalselt keelatud", + "Filter is now globally enabled": "Filter on nüüd globaalselt lubatud", + "Filters": "Filtrid", + "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "Tuvastati sõrmejälje võltsimine: initsiaalide kasutamine avatarina pole võimalik. Kasutatakse vaikimisi profiilikujutist.", + "Fluidly stream large external response chunks": "Suurte väliste vastuste tükkide sujuv voogedastus", + "Focus chat input": "Fokuseeri vestluse sisendile", + "Folder deleted successfully": "Kaust edukalt kustutatud", + "Folder name cannot be empty": "Kausta nimi ei saa olla tühi", + "Folder name cannot be empty.": "Kausta nimi ei saa olla tühi.", + "Folder name updated successfully": "Kausta nimi edukalt uuendatud", + "Followed instructions perfectly": "Järgis juhiseid täiuslikult", + "Forge new paths": "Loo uusi radu", + "Form": "Vorm", + "Format your variables using brackets like this:": "Vormindage oma muutujad sulgudega nagu siin:", + "Frequency Penalty": "Sageduse karistus", + "Full Context Mode": "Täiskonteksti režiim", + "Function": "Funktsioon", + "Function Calling": "Funktsiooni kutsumine", + "Function created successfully": "Funktsioon edukalt loodud", + "Function deleted successfully": "Funktsioon edukalt kustutatud", + "Function Description": "Funktsiooni kirjeldus", + "Function ID": "Funktsiooni ID", + "Function is now globally disabled": "Funktsioon on nüüd globaalselt keelatud", + "Function is now globally enabled": "Funktsioon on nüüd globaalselt lubatud", + "Function Name": "Funktsiooni nimi", + "Function updated successfully": "Funktsioon edukalt uuendatud", + "Functions": "Funktsioonid", + "Functions allow arbitrary code execution": "Funktsioonid võimaldavad suvalise koodi käivitamist", + "Functions allow arbitrary code execution.": "Funktsioonid võimaldavad suvalise koodi käivitamist.", + "Functions imported successfully": "Funktsioonid edukalt imporditud", + "Gemini": "Gemini", + "Gemini API Config": "Gemini API seadistus", + "Gemini API Key is required.": "Gemini API võti on nõutav.", + "General": "Üldine", + "Generate an image": "Genereeri pilt", + "Generate Image": "Genereeri pilt", + "Generate prompt pair": "Genereeri vihjete paar", + "Generating search query": "Otsinguküsimuse genereerimine", + "Get started": "Alusta", + "Get started with {{WEBUI_NAME}}": "Alusta {{WEBUI_NAME}} kasutamist", + "Global": "Globaalne", + "Good Response": "Hea vastus", + "Google Drive": "Google Drive", + "Google PSE API Key": "Google PSE API võti", + "Google PSE Engine Id": "Google PSE mootori ID", + "Group created successfully": "Grupp edukalt loodud", + "Group deleted successfully": "Grupp edukalt kustutatud", + "Group Description": "Grupi kirjeldus", + "Group Name": "Grupi nimi", + "Group updated successfully": "Grupp edukalt uuendatud", + "Groups": "Grupid", + "Haptic Feedback": "Haptiline tagasiside", + "has no conversations.": "vestlused puuduvad.", + "Hello, {{name}}": "Tere, {{name}}", + "Help": "Abi", + "Help us create the best community leaderboard by sharing your feedback history!": "Aidake meil luua parim kogukonna edetabel, jagades oma tagasiside ajalugu!", + "Hex Color": "Hex värv", + "Hex Color - Leave empty for default color": "Hex värv - jätke tühjaks vaikevärvi jaoks", + "Hide": "Peida", + "Home": "Avaleht", + "Host": "Host", + "How can I help you today?": "Kuidas saan teid täna aidata?", + "How would you rate this response?": "Kuidas hindaksite seda vastust?", + "Hybrid Search": "Hübriidotsing", + "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Kinnitan, et olen lugenud ja mõistan oma tegevuse tagajärgi. Olen teadlik suvalise koodi käivitamisega seotud riskidest ja olen kontrollinud allika usaldusväärsust.", + "ID": "ID", + "Ignite curiosity": "Süüta uudishimu", + "Image": "Pilt", + "Image Compression": "Pildi tihendamine", + "Image Generation": "Pildi genereerimine", + "Image Generation (Experimental)": "Pildi genereerimine (katsetuslik)", + "Image Generation Engine": "Pildi genereerimise mootor", + "Image Max Compression Size": "Pildi maksimaalne tihendamise suurus", + "Image Prompt Generation": "Pildi vihje genereerimine", + "Image Prompt Generation Prompt": "Pildi vihje genereerimise vihje", + "Image Settings": "Pildi seaded", + "Images": "Pildid", + "Import Chats": "Impordi vestlused", + "Import Config from JSON File": "Impordi seadistus JSON-failist", + "Import Functions": "Impordi funktsioonid", + "Import Models": "Impordi mudelid", + "Import Presets": "Impordi eelseadistused", + "Import Prompts": "Impordi vihjed", + "Import Tools": "Impordi tööriistad", + "Include": "Kaasa", + "Include `--api-auth` flag when running stable-diffusion-webui": "Lisage `--api-auth` lipp stable-diffusion-webui käivitamisel", + "Include `--api` flag when running stable-diffusion-webui": "Lisage `--api` lipp stable-diffusion-webui käivitamisel", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Mõjutab, kui kiiresti algoritm reageerib genereeritud teksti tagasisidele. Madalam õppimiskiirus annab tulemuseks aeglasemad kohandused, samas kui kõrgem õppimiskiirus muudab algoritmi tundlikumaks.", + "Info": "Info", + "Input commands": "Sisendkäsud", + "Install from Github URL": "Installige Github URL-ilt", + "Instant Auto-Send After Voice Transcription": "Kohene automaatne saatmine pärast hääle transkriptsiooni", + "Integration": "Integratsioon", + "Interface": "Kasutajaliides", + "Invalid file format.": "Vigane failiformaat.", + "Invalid Tag": "Vigane silt", + "is typing...": "kirjutab...", + "January": "Jaanuar", + "Jina API Key": "Jina API võti", + "join our Discord for help.": "liituge abi saamiseks meie Discordiga.", + "JSON": "JSON", + "JSON Preview": "JSON eelvaade", + "July": "Juuli", + "June": "Juuni", + "Jupyter Auth": "Jupyter autentimine", + "Jupyter URL": "Jupyter URL", + "JWT Expiration": "JWT aegumine", + "JWT Token": "JWT token", + "Kagi Search API Key": "Kagi Search API võti", + "Keep Alive": "Hoia elus", + "Key": "Võti", + "Keyboard shortcuts": "Klaviatuuri otseteed", + "Knowledge": "Teadmised", + "Knowledge Access": "Teadmiste juurdepääs", + "Knowledge created successfully.": "Teadmised edukalt loodud.", + "Knowledge deleted successfully.": "Teadmised edukalt kustutatud.", + "Knowledge reset successfully.": "Teadmised edukalt lähtestatud.", + "Knowledge updated successfully": "Teadmised edukalt uuendatud", + "Kokoro.js (Browser)": "Kokoro.js (brauser)", + "Kokoro.js Dtype": "Kokoro.js andmetüüp", + "Label": "Silt", + "Landing Page Mode": "Maandumislehe režiim", + "Language": "Keel", + "Last Active": "Viimati aktiivne", + "Last Modified": "Viimati muudetud", + "Last reply": "Viimane vastus", + "LDAP": "LDAP", + "LDAP server updated": "LDAP server uuendatud", + "Leaderboard": "Edetabel", + "Leave empty for unlimited": "Jäta tühjaks piiranguta kasutamiseks", + "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/api/tags\" lõpp-punktist", + "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/models\" lõpp-punktist", + "Leave empty to include all models or select specific models": "Jäta tühjaks, et kaasata kõik mudelid või vali konkreetsed mudelid", + "Leave empty to use the default prompt, or enter a custom prompt": "Jäta tühjaks, et kasutada vaikimisi vihjet, või sisesta kohandatud vihje", + "Leave model field empty to use the default model.": "Jäta mudeli väli tühjaks, et kasutada vaikimisi mudelit.", + "License": "Litsents", + "Light": "Hele", + "Listening...": "Kuulamine...", + "Llama.cpp": "Llama.cpp", + "LLMs can make mistakes. Verify important information.": "LLM-id võivad teha vigu. Kontrollige olulist teavet.", + "Loader": "Laadija", + "Loading Kokoro.js...": "Kokoro.js laadimine...", + "Local": "Kohalik", + "Local Models": "Kohalikud mudelid", + "Location access not allowed": "Asukoha juurdepääs pole lubatud", + "Logit Bias": "Logiti kallutatus", + "Lost": "Kaotanud", + "LTR": "LTR", + "Made by Open WebUI Community": "Loodud Open WebUI kogukonna poolt", + "Make sure to enclose them with": "Veenduge, et need on ümbritsetud järgmisega:", + "Make sure to export a workflow.json file as API format from ComfyUI.": "Veenduge, et ekspordite workflow.json faili API formaadis ComfyUI-st.", + "Manage": "Halda", + "Manage Direct Connections": "Halda otseseid ühendusi", + "Manage Models": "Halda mudeleid", + "Manage Ollama": "Halda Ollama't", + "Manage Ollama API Connections": "Halda Ollama API ühendusi", + "Manage OpenAI API Connections": "Halda OpenAI API ühendusi", + "Manage Pipelines": "Halda torustikke", + "March": "Märts", + "Max Tokens (num_predict)": "Max tokeneid (num_predict)", + "Max Upload Count": "Maksimaalne üleslaadimiste arv", + "Max Upload Size": "Maksimaalne üleslaadimise suurus", + "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Korraga saab alla laadida maksimaalselt 3 mudelit. Palun proovige hiljem uuesti.", + "May": "Mai", + "Memories accessible by LLMs will be shown here.": "LLM-idele ligipääsetavad mälestused kuvatakse siin.", + "Memory": "Mälu", + "Memory added successfully": "Mälu edukalt lisatud", + "Memory cleared successfully": "Mälu edukalt tühjendatud", + "Memory deleted successfully": "Mälu edukalt kustutatud", + "Memory updated successfully": "Mälu edukalt uuendatud", + "Merge Responses": "Ühenda vastused", + "Message rating should be enabled to use this feature": "Selle funktsiooni kasutamiseks peaks sõnumite hindamine olema lubatud", + "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Teie saadetud sõnumeid pärast lingi loomist ei jagata. Kasutajad, kellel on URL, saavad vaadata jagatud vestlust.", + "Min P": "Min P", + "Minimum Score": "Minimaalne skoor", + "Mirostat": "Mirostat", + "Mirostat Eta": "Mirostat Eta", + "Mirostat Tau": "Mirostat Tau", + "Model": "Mudel", + "Model '{{modelName}}' has been successfully downloaded.": "Mudel '{{modelName}}' on edukalt alla laaditud.", + "Model '{{modelTag}}' is already in queue for downloading.": "Mudel '{{modelTag}}' on juba allalaadimise järjekorras.", + "Model {{modelId}} not found": "Mudelit {{modelId}} ei leitud", + "Model {{modelName}} is not vision capable": "Mudel {{modelName}} ei ole võimeline visuaalseid sisendeid töötlema", + "Model {{name}} is now {{status}}": "Mudel {{name}} on nüüd {{status}}", + "Model accepts image inputs": "Mudel võtab vastu pilte sisendina", + "Model created successfully!": "Mudel edukalt loodud!", + "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Tuvastati mudeli failisüsteemi tee. Uuendamiseks on vajalik mudeli lühinimi, ei saa jätkata.", + "Model Filtering": "Mudeli filtreerimine", + "Model ID": "Mudeli ID", + "Model IDs": "Mudeli ID-d", + "Model Name": "Mudeli nimi", + "Model not selected": "Mudel pole valitud", + "Model Params": "Mudeli parameetrid", + "Model Permissions": "Mudeli õigused", + "Model updated successfully": "Mudel edukalt uuendatud", + "Modelfile Content": "Modelfile sisu", + "Models": "Mudelid", + "Models Access": "Mudelite juurdepääs", + "Models configuration saved successfully": "Mudelite seadistus edukalt salvestatud", + "Mojeek Search API Key": "Mojeek Search API võti", + "more": "rohkem", + "More": "Rohkem", + "Name": "Nimi", + "Name your knowledge base": "Nimetage oma teadmiste baas", + "Native": "Omane", + "New Chat": "Uus vestlus", + "New Folder": "Uus kaust", + "New Password": "Uus parool", + "new-channel": "uus-kanal", + "No content found": "Sisu ei leitud", + "No content to speak": "Pole mida rääkida", + "No distance available": "Kaugus pole saadaval", + "No feedbacks found": "Tagasisidet ei leitud", + "No file selected": "Faili pole valitud", + "No files found.": "Faile ei leitud.", + "No groups with access, add a group to grant access": "Puuduvad juurdepääsuõigustega grupid, lisage grupp juurdepääsu andmiseks", + "No HTML, CSS, or JavaScript content found.": "HTML, CSS ega JavaScript sisu ei leitud.", + "No inference engine with management support found": "Järeldusmootorit haldamise toega ei leitud", + "No knowledge found": "Teadmisi ei leitud", + "No memories to clear": "Pole mälestusi, mida kustutada", + "No model IDs": "Mudeli ID-d puuduvad", + "No models found": "Mudeleid ei leitud", + "No models selected": "Mudeleid pole valitud", + "No results found": "Tulemusi ei leitud", + "No search query generated": "Otsingupäringut ei genereeritud", + "No source available": "Allikas pole saadaval", + "No users were found.": "Kasutajaid ei leitud.", + "No valves to update": "Pole klappe, mida uuendada", + "None": "Mitte ühtegi", + "Not factually correct": "Faktiliselt ebakorrektne", + "Not helpful": "Pole abistav", + "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Märkus: kui määrate minimaalse skoori, tagastab otsing ainult dokumendid, mille skoor on suurem või võrdne minimaalse skooriga.", + "Notes": "Märkmed", + "Notification Sound": "Teavituse heli", + "Notification Webhook": "Teavituse webhook", + "Notifications": "Teavitused", + "November": "November", + "num_gpu (Ollama)": "num_gpu (Ollama)", + "num_thread (Ollama)": "num_thread (Ollama)", + "OAuth ID": "OAuth ID", + "October": "Oktoober", + "Off": "Väljas", + "Okay, Let's Go!": "Hea küll, lähme!", + "OLED Dark": "OLED tume", + "Ollama": "Ollama", + "Ollama API": "Ollama API", + "Ollama API settings updated": "Ollama API seaded uuendatud", + "Ollama Version": "Ollama versioon", + "On": "Sees", + "OneDrive": "OneDrive", + "Only alphanumeric characters and hyphens are allowed": "Lubatud on ainult tähtede-numbrite kombinatsioonid ja sidekriipsud", + "Only alphanumeric characters and hyphens are allowed in the command string.": "Käsustringis on lubatud ainult tähtede-numbrite kombinatsioonid ja sidekriipsud.", + "Only collections can be edited, create a new knowledge base to edit/add documents.": "Muuta saab ainult kogusid, dokumentide muutmiseks/lisamiseks looge uus teadmiste baas.", + "Only select users and groups with permission can access": "Juurdepääs on ainult valitud õigustega kasutajatel ja gruppidel", + "Oops! Looks like the URL is invalid. Please double-check and try again.": "Oih! URL tundub olevat vigane. Palun kontrollige ja proovige uuesti.", + "Oops! There are files still uploading. Please wait for the upload to complete.": "Oih! Failide üleslaadimine on veel pooleli. Palun oodake, kuni üleslaadimine lõpeb.", + "Oops! There was an error in the previous response.": "Oih! Eelmises vastuses oli viga.", + "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oih! Kasutate toetamatut meetodit (ainult kasutajaliides). Palun serveerige WebUI tagarakendusest.", + "Open file": "Ava fail", + "Open in full screen": "Ava täisekraanil", + "Open new chat": "Ava uus vestlus", + "Open WebUI uses faster-whisper internally.": "Open WebUI kasutab sisemiselt faster-whisper'it.", + "Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.": "Open WebUI kasutab SpeechT5 ja CMU Arctic kõneleja manustamisi.", + "Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "Open WebUI versioon (v{{OPEN_WEBUI_VERSION}}) on madalam kui nõutav versioon (v{{REQUIRED_VERSION}})", + "OpenAI": "OpenAI", + "OpenAI API": "OpenAI API", + "OpenAI API Config": "OpenAI API seadistus", + "OpenAI API Key is required.": "OpenAI API võti on nõutav.", + "OpenAI API settings updated": "OpenAI API seaded uuendatud", + "OpenAI URL/Key required.": "OpenAI URL/võti on nõutav.", + "or": "või", + "Organize your users": "Korraldage oma kasutajad", + "Other": "Muu", + "OUTPUT": "VÄLJUND", + "Output format": "Väljundformaat", + "Overview": "Ülevaade", + "page": "leht", + "Password": "Parool", + "Paste Large Text as File": "Kleebi suur tekst failina", + "PDF document (.pdf)": "PDF dokument (.pdf)", + "PDF Extract Images (OCR)": "PDF-ist piltide väljavõtmine (OCR)", + "pending": "ootel", + "Permission denied when accessing media devices": "Juurdepääs meediumiseadmetele keelatud", + "Permission denied when accessing microphone": "Juurdepääs mikrofonile keelatud", + "Permission denied when accessing microphone: {{error}}": "Juurdepääs mikrofonile keelatud: {{error}}", + "Permissions": "Õigused", + "Perplexity API Key": "Perplexity API võti", + "Personalization": "Isikupärastamine", + "Pin": "Kinnita", + "Pinned": "Kinnitatud", + "Pioneer insights": "Pioneeri arusaamad", + "Pipeline deleted successfully": "Torustik edukalt kustutatud", + "Pipeline downloaded successfully": "Torustik edukalt alla laaditud", + "Pipelines": "Torustikud", + "Pipelines Not Detected": "Torustikke ei tuvastatud", + "Pipelines Valves": "Torustike klapid", + "Plain text (.txt)": "Lihttekst (.txt)", + "Playground": "Mänguväljak", + "Please carefully review the following warnings:": "Palun vaadake hoolikalt läbi järgmised hoiatused:", + "Please do not close the settings page while loading the model.": "Palun ärge sulgege seadete lehte mudeli laadimise ajal.", + "Please enter a prompt": "Palun sisestage vihje", + "Please fill in all fields.": "Palun täitke kõik väljad.", + "Please select a model first.": "Palun valige esmalt mudel.", + "Please select a model.": "Palun valige mudel.", + "Please select a reason": "Palun valige põhjus", + "Port": "Port", + "Positive attitude": "Positiivne suhtumine", + "Prefix ID": "Prefiksi ID", + "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "Prefiksi ID-d kasutatakse teiste ühendustega konfliktide vältimiseks, lisades mudeli ID-dele prefiksi - jätke tühjaks keelamiseks", + "Presence Penalty": "Kohaloleku karistus", + "Previous 30 days": "Eelmised 30 päeva", + "Previous 7 days": "Eelmised 7 päeva", + "Profile Image": "Profiilipilt", + "Prompt": "Vihje", + "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Vihje (nt Räägi mulle üks huvitav fakt Rooma impeeriumi kohta)", + "Prompt Content": "Vihje sisu", + "Prompt created successfully": "Vihje edukalt loodud", + "Prompt suggestions": "Vihje soovitused", + "Prompt updated successfully": "Vihje edukalt uuendatud", + "Prompts": "Vihjed", + "Prompts Access": "Vihjete juurdepääs", + "Pull \"{{searchValue}}\" from Ollama.com": "Tõmba \"{{searchValue}}\" Ollama.com-ist", + "Pull a model from Ollama.com": "Tõmba mudel Ollama.com-ist", + "Query Generation Prompt": "Päringu genereerimise vihje", + "RAG Template": "RAG mall", + "Rating": "Hinnang", + "Re-rank models by topic similarity": "Järjesta mudelid teema sarnasuse alusel ümber", + "Read": "Loe", + "Read Aloud": "Loe valjult", + "Reasoning Effort": "Arutluspingutus", + "Record voice": "Salvesta hääl", + "Redirecting you to Open WebUI Community": "Suunamine Open WebUI kogukonda", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Vähendab mõttetuste genereerimise tõenäosust. Kõrgem väärtus (nt 100) annab mitmekesisemaid vastuseid, samas kui madalam väärtus (nt 10) on konservatiivsem.", + "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Viita endale kui \"Kasutaja\" (nt \"Kasutaja õpib hispaania keelt\")", + "References from": "Viited allikast", + "Refused when it shouldn't have": "Keeldus, kui ei oleks pidanud", + "Regenerate": "Regenereeri", + "Release Notes": "Väljalaskemärkmed", + "Relevance": "Asjakohasus", + "Remove": "Eemalda", + "Remove Model": "Eemalda mudel", + "Rename": "Nimeta ümber", + "Reorder Models": "Muuda mudelite järjekorda", + "Repeat Last N": "Korda viimast N", + "Repeat Penalty (Ollama)": "Korduse karistus (Ollama)", + "Reply in Thread": "Vasta lõimes", + "Request Mode": "Päringu režiim", + "Reranking Model": "Ümberjärjestamise mudel", + "Reranking model disabled": "Ümberjärjestamise mudel keelatud", + "Reranking model set to \"{{reranking_model}}\"": "Ümberjärjestamise mudel määratud kui \"{{reranking_model}}\"", + "Reset": "Lähtesta", + "Reset All Models": "Lähtesta kõik mudelid", + "Reset Upload Directory": "Lähtesta üleslaadimiste kataloog", + "Reset Vector Storage/Knowledge": "Lähtesta vektormälu/teadmised", + "Reset view": "Lähtesta vaade", + "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Vastuste teavitusi ei saa aktiveerida, kuna veebisaidi õigused on keelatud. Vajalike juurdepääsude andmiseks külastage oma brauseri seadeid.", + "Response splitting": "Vastuse tükeldamine", + "Result": "Tulemus", + "Retrieval": "Taastamine", + "Retrieval Query Generation": "Taastamise päringu genereerimine", + "Rich Text Input for Chat": "Rikasteksti sisend vestluse jaoks", + "RK": "RK", + "Role": "Roll", + "Rosé Pine": "Rosé Pine", + "Rosé Pine Dawn": "Rosé Pine Dawn", + "RTL": "RTL", + "Run": "Käivita", + "Running": "Töötab", + "Save": "Salvesta", + "Save & Create": "Salvesta ja loo", + "Save & Update": "Salvesta ja uuenda", + "Save As Copy": "Salvesta koopiana", + "Save Tag": "Salvesta silt", + "Saved": "Salvestatud", + "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Vestluslogi salvestamine otse teie brauseri mällu pole enam toetatud. Palun võtke hetk, et alla laadida ja kustutada oma vestluslogi, klõpsates allpool olevat nuppu. Ärge muretsege, saate hõlpsasti oma vestluslogi tagarakendusse uuesti importida, kasutades", + "Scroll to bottom when switching between branches": "Keri alla harus liikumisel", + "Search": "Otsing", + "Search a model": "Otsi mudelit", + "Search Base": "Otsingu baas", + "Search Chats": "Otsi vestlusi", + "Search Collection": "Otsi kogust", + "Search Filters": "Otsingu filtrid", + "search for tags": "otsi silte", + "Search Functions": "Otsi funktsioone", + "Search Knowledge": "Otsi teadmisi", + "Search Models": "Otsi mudeleid", + "Search options": "Otsingu valikud", + "Search Prompts": "Otsi vihjeid", + "Search Result Count": "Otsingutulemuste arv", + "Search the internet": "Otsi internetist", + "Search Tools": "Otsi tööriistu", + "SearchApi API Key": "SearchApi API võti", + "SearchApi Engine": "SearchApi mootor", + "Searched {{count}} sites": "Otsiti {{count}} saidilt", + "Searching \"{{searchQuery}}\"": "Otsimine: \"{{searchQuery}}\"", + "Searching Knowledge for \"{{searchQuery}}\"": "Teadmistest otsimine: \"{{searchQuery}}\"", + "Searxng Query URL": "Searxng päringu URL", + "See readme.md for instructions": "Juhiste saamiseks vaadake readme.md", + "See what's new": "Vaata, mis on uut", + "Seed": "Seeme", + "Select a base model": "Valige baas mudel", + "Select a engine": "Valige mootor", + "Select a function": "Valige funktsioon", + "Select a group": "Valige grupp", + "Select a model": "Valige mudel", + "Select a pipeline": "Valige torustik", + "Select a pipeline url": "Valige torustiku URL", + "Select a tool": "Valige tööriist", + "Select an auth method": "Valige autentimismeetod", + "Select an Ollama instance": "Valige Ollama instants", + "Select Engine": "Valige mootor", + "Select Knowledge": "Valige teadmised", + "Select only one model to call": "Valige ainult üks mudel kutsumiseks", + "Selected model(s) do not support image inputs": "Valitud mudel(id) ei toeta pilte sisendina", + "Semantic distance to query": "Semantiline kaugus päringust", + "Send": "Saada", + "Send a Message": "Saada sõnum", + "Send message": "Saada sõnum", + "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Saadab `stream_options: { include_usage: true }` päringus.\nToetatud teenusepakkujad tagastavad määramisel vastuses tokeni kasutuse teabe.", + "September": "September", + "SerpApi API Key": "SerpApi API võti", + "SerpApi Engine": "SerpApi mootor", + "Serper API Key": "Serper API võti", + "Serply API Key": "Serply API võti", + "Serpstack API Key": "Serpstack API võti", + "Server connection verified": "Serveri ühendus kontrollitud", + "Set as default": "Määra vaikimisi", + "Set CFG Scale": "Määra CFG skaala", + "Set Default Model": "Määra vaikimisi mudel", + "Set embedding model": "Määra manustamise mudel", + "Set embedding model (e.g. {{model}})": "Määra manustamise mudel (nt {{model}})", + "Set Image Size": "Määra pildi suurus", + "Set reranking model (e.g. {{model}})": "Määra ümberjärjestamise mudel (nt {{model}})", + "Set Sampler": "Määra valimismeetod", + "Set Scheduler": "Määra planeerija", + "Set Steps": "Määra sammud", + "Set Task Model": "Määra ülesande mudel", + "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Määrake kihtide arv, mis laaditakse GPU-le. Selle väärtuse suurendamine võib oluliselt parandada jõudlust mudelite puhul, mis on optimeeritud GPU kiirenduse jaoks, kuid võib tarbida rohkem energiat ja GPU ressursse.", + "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Määrake arvutusteks kasutatavate töölõimede arv. See valik kontrollib, mitu lõime kasutatakse saabuvate päringute samaaegseks töötlemiseks. Selle väärtuse suurendamine võib parandada jõudlust suure samaaegsusega töökoormuste korral, kuid võib tarbida rohkem CPU ressursse.", + "Set Voice": "Määra hääl", + "Set whisper model": "Määra whisper mudel", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab tasase kallutatuse tokenite vastu, mis on esinenud vähemalt üks kord. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab skaleeritava kallutatuse tokenite vastu korduste karistamiseks, põhinedes sellel, mitu korda need on esinenud. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", + "Sets how far back for the model to look back to prevent repetition.": "Määrab, kui kaugele mudel tagasi vaatab, et vältida kordusi.", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Määrab genereerimiseks kasutatava juhusliku arvu seemne. Selle määramine kindlale numbrile paneb mudeli genereerima sama teksti sama vihje korral.", + "Sets the size of the context window used to generate the next token.": "Määrab järgmise tokeni genereerimiseks kasutatava konteksti akna suuruse.", + "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Määrab kasutatavad lõpetamise järjestused. Kui see muster kohatakse, lõpetab LLM teksti genereerimise ja tagastab. Mitme lõpetamise mustri saab määrata, täpsustades modelfile'is mitu eraldi lõpetamise parameetrit.", + "Settings": "Seaded", + "Settings saved successfully!": "Seaded edukalt salvestatud!", + "Share": "Jaga", + "Share Chat": "Jaga vestlust", + "Share to Open WebUI Community": "Jaga Open WebUI kogukonnaga", + "Show": "Näita", + "Show \"What's New\" modal on login": "Näita \"Mis on uut\" modaalakent sisselogimisel", + "Show Admin Details in Account Pending Overlay": "Näita administraatori üksikasju konto ootel kattekihil", + "Show shortcuts": "Näita otseteid", + "Show your support!": "Näita oma toetust!", + "Showcased creativity": "Näitas loovust", + "Sign in": "Logi sisse", + "Sign in to {{WEBUI_NAME}}": "Logi sisse {{WEBUI_NAME}}", + "Sign in to {{WEBUI_NAME}} with LDAP": "Logi sisse {{WEBUI_NAME}} LDAP-ga", + "Sign Out": "Logi välja", + "Sign up": "Registreeru", + "Sign up to {{WEBUI_NAME}}": "Registreeru {{WEBUI_NAME}}", + "Signing in to {{WEBUI_NAME}}": "Sisselogimine {{WEBUI_NAME}}", + "sk-1234": "sk-1234", + "Source": "Allikas", + "Speech Playback Speed": "Kõne taasesituse kiirus", + "Speech recognition error: {{error}}": "Kõnetuvastuse viga: {{error}}", + "Speech-to-Text Engine": "Kõne-tekstiks mootor", + "Stop": "Peata", + "Stop Sequence": "Lõpetamise järjestus", + "Stream Chat Response": "Voogedasta vestluse vastust", + "STT Model": "STT mudel", + "STT Settings": "STT seaded", + "Subtitle (e.g. about the Roman Empire)": "Alampealkiri (nt Rooma impeeriumi kohta)", + "Success": "Õnnestus", + "Successfully updated.": "Edukalt uuendatud.", + "Suggested": "Soovitatud", + "Support": "Tugi", + "Support this plugin:": "Toeta seda pistikprogrammi:", + "Sync directory": "Sünkroniseeri kataloog", + "System": "Süsteem", + "System Instructions": "Süsteemi juhised", + "System Prompt": "Süsteemi vihje", + "Tags Generation": "Siltide genereerimine", + "Tags Generation Prompt": "Siltide genereerimise vihje", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Saba vaba valimit kasutatakse väljundis vähem tõenäoliste tokenite mõju vähendamiseks. Kõrgem väärtus (nt 2,0) vähendab mõju rohkem, samas kui väärtus 1,0 keelab selle seade.", + "Talk to model": "Räägi mudeliga", + "Tap to interrupt": "Puuduta katkestamiseks", + "Tasks": "Ülesanded", + "Tavily API Key": "Tavily API võti", + "Tell us more:": "Räägi meile lähemalt:", + "Temperature": "Temperatuur", + "Template": "Mall", + "Temporary Chat": "Ajutine vestlus", + "Text Splitter": "Teksti tükeldaja", + "Text-to-Speech Engine": "Tekst-kõneks mootor", + "Tfs Z": "Tfs Z", + "Thanks for your feedback!": "Täname tagasiside eest!", + "The Application Account DN you bind with for search": "Rakenduse konto DN, millega seote otsingu jaoks", + "The base to search for users": "Baas kasutajate otsimiseks", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Partii suurus määrab, mitu tekstipäringut töödeldakse korraga. Suurem partii suurus võib suurendada mudeli jõudlust ja kiirust, kuid see nõuab ka rohkem mälu.", + "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Selle pistikprogrammi taga olevad arendajad on kogukonna pühendunud vabatahtlikud. Kui leiate, et see pistikprogramm on kasulik, palun kaaluge selle arendamise toetamist.", + "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hindamise edetabel põhineb Elo hindamissüsteemil ja seda uuendatakse reaalajas.", + "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP atribuut, mis kaardistab e-posti, mida kasutajad kasutavad sisselogimiseks.", + "The LDAP attribute that maps to the username that users use to sign in.": "LDAP atribuut, mis kaardistab kasutajanime, mida kasutajad kasutavad sisselogimiseks.", + "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Edetabel on praegu beetaversioonina ja me võime kohandada hindamisarvutusi algoritmi täiustamisel.", + "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksimaalne failisuurus MB-des. Kui failisuurus ületab seda piiri, faili ei laadita üles.", + "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksimaalne failide arv, mida saab korraga vestluses kasutada. Kui failide arv ületab selle piiri, faile ei laadita üles.", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skoor peaks olema väärtus vahemikus 0,0 (0%) kuni 1,0 (100%).", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "Mudeli temperatuur. Temperatuuri suurendamine paneb mudeli vastama loovamalt.", + "Theme": "Teema", + "Thinking...": "Mõtleb...", + "This action cannot be undone. Do you wish to continue?": "Seda toimingut ei saa tagasi võtta. Kas soovite jätkata?", + "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "See tagab, et teie väärtuslikud vestlused salvestatakse turvaliselt teie tagarakenduse andmebaasi. Täname!", + "This is an experimental feature, it may not function as expected and is subject to change at any time.": "See on katsetuslik funktsioon, see ei pruugi toimida ootuspäraselt ja võib igal ajal muutuda.", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "See valik kontrollib, mitu tokenit säilitatakse konteksti värskendamisel. Näiteks kui see on määratud 2-le, säilitatakse vestluse konteksti viimased 2 tokenit. Konteksti säilitamine võib aidata säilitada vestluse järjepidevust, kuid võib vähendada võimet reageerida uutele teemadele.", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "See valik määrab maksimaalse tokenite arvu, mida mudel saab oma vastuses genereerida. Selle piirmäära suurendamine võimaldab mudelil anda pikemaid vastuseid, kuid võib suurendada ka ebavajaliku või ebaolulise sisu genereerimise tõenäosust.", + "This option will delete all existing files in the collection and replace them with newly uploaded files.": "See valik kustutab kõik olemasolevad failid kogust ja asendab need äsja üleslaaditud failidega.", + "This response was generated by \"{{model}}\"": "Selle vastuse genereeris \"{{model}}\"", + "This will delete": "See kustutab", + "This will delete {{NAME}} and all its contents.": "See kustutab {{NAME}} ja kogu selle sisu.", + "This will delete all models including custom models": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid", + "This will delete all models including custom models and cannot be undone.": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid, ja seda ei saa tagasi võtta.", + "This will reset the knowledge base and sync all files. Do you wish to continue?": "See lähtestab teadmiste baasi ja sünkroniseerib kõik failid. Kas soovite jätkata?", + "Thorough explanation": "Põhjalik selgitus", + "Thought for {{DURATION}}": "Mõtles {{DURATION}}", + "Thought for {{DURATION}} seconds": "Mõtles {{DURATION}} sekundit", + "Tika": "Tika", + "Tika Server URL required.": "Tika serveri URL on nõutav.", + "Tiktoken": "Tiktoken", + "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Nõuanne: Värskendage mitut muutuja kohta järjestikku, vajutades pärast iga asendust vestluse sisendis tabeldusklahvi.", + "Title": "Pealkiri", + "Title (e.g. Tell me a fun fact)": "Pealkiri (nt Räägi mulle üks huvitav fakt)", + "Title Auto-Generation": "Pealkirja automaatne genereerimine", + "Title cannot be an empty string.": "Pealkiri ei saa olla tühi string.", + "Title Generation": "Pealkirja genereerimine", + "Title Generation Prompt": "Pealkirja genereerimise vihje", + "TLS": "TLS", + "To access the available model names for downloading,": "Juurdepääsuks saadaolevatele mudelinimedele allalaadimiseks,", + "To access the GGUF models available for downloading,": "Juurdepääsuks allalaadimiseks saadaolevatele GGUF mudelitele,", + "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "WebUI-le juurdepääsuks võtke ühendust administraatoriga. Administraatorid saavad hallata kasutajate staatuseid administraatori paneelist.", + "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Teadmiste baasi siia lisamiseks lisage need esmalt \"Teadmiste\" tööalale.", + "To learn more about available endpoints, visit our documentation.": "Saadaolevate lõpp-punktide kohta rohkem teada saamiseks külastage meie dokumentatsiooni.", + "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Teie privaatsuse kaitsmiseks jagatakse teie tagasisidest ainult hinnanguid, mudeli ID-sid, silte ja metaandmeid - teie vestluslogi jääb privaatseks ja neid ei kaasata.", + "To select actions here, add them to the \"Functions\" workspace first.": "Toimingute siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", + "To select filters here, add them to the \"Functions\" workspace first.": "Filtrite siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", + "To select toolkits here, add them to the \"Tools\" workspace first.": "Tööriistakomplektide siit valimiseks lisage need esmalt \"Tööriistade\" tööalale.", + "Toast notifications for new updates": "Hüpikmärguanded uuenduste kohta", + "Today": "Täna", + "Toggle settings": "Lülita seaded", + "Toggle sidebar": "Lülita külgriba", + "Token": "Token", + "Tokens To Keep On Context Refresh (num_keep)": "Konteksti värskendamisel säilitatavad tokenid (num_keep)", + "Too verbose": "Liiga paljusõnaline", + "Tool created successfully": "Tööriist edukalt loodud", + "Tool deleted successfully": "Tööriist edukalt kustutatud", + "Tool Description": "Tööriista kirjeldus", + "Tool ID": "Tööriista ID", + "Tool imported successfully": "Tööriist edukalt imporditud", + "Tool Name": "Tööriista nimi", + "Tool updated successfully": "Tööriist edukalt uuendatud", + "Tools": "Tööriistad", + "Tools Access": "Tööriistade juurdepääs", + "Tools are a function calling system with arbitrary code execution": "Tööriistad on funktsioonide kutsumise süsteem suvalise koodi täitmisega", + "Tools Function Calling Prompt": "Tööriistade funktsioonide kutsumise vihje", + "Tools have a function calling system that allows arbitrary code execution": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist", + "Tools have a function calling system that allows arbitrary code execution.": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist.", + "Top K": "Top K", + "Top P": "Top P", + "Transformers": "Transformers", + "Trouble accessing Ollama?": "Probleeme Ollama juurdepääsuga?", + "Trust Proxy Environment": "Usalda puhverserveri keskkonda", + "TTS Model": "TTS mudel", + "TTS Settings": "TTS seaded", + "TTS Voice": "TTS hääl", + "Type": "Tüüp", + "Type Hugging Face Resolve (Download) URL": "Sisestage Hugging Face Resolve (Allalaadimise) URL", + "Uh-oh! There was an issue with the response.": "Oi-oi! Vastusega oli probleem.", + "UI": "Kasutajaliides", + "Unarchive All": "Eemalda kõik arhiivist", + "Unarchive All Archived Chats": "Eemalda kõik arhiveeritud vestlused arhiivist", + "Unarchive Chat": "Eemalda vestlus arhiivist", + "Unlock mysteries": "Ava mõistatused", + "Unpin": "Võta lahti", + "Unravel secrets": "Ava saladused", + "Untagged": "Sildistamata", + "Update": "Uuenda", + "Update and Copy Link": "Uuenda ja kopeeri link", + "Update for the latest features and improvements.": "Uuendage, et saada uusimad funktsioonid ja täiustused.", + "Update password": "Uuenda parooli", + "Updated": "Uuendatud", + "Updated at": "Uuendamise aeg", + "Updated At": "Uuendamise aeg", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Uuendage litsentseeritud plaanile täiustatud võimaluste jaoks, sealhulgas kohandatud teemad ja bränding ning pühendatud tugi.", + "Upload": "Laadi üles", + "Upload a GGUF model": "Laadige üles GGUF mudel", + "Upload directory": "Üleslaadimise kataloog", + "Upload files": "Laadi failid üles", + "Upload Files": "Laadi failid üles", + "Upload Pipeline": "Laadi torustik üles", + "Upload Progress": "Üleslaadimise progress", + "URL": "URL", + "URL Mode": "URL režiim", + "Use '#' in the prompt input to load and include your knowledge.": "Kasutage '#' vihjete sisendis, et laadida ja kaasata oma teadmised.", + "Use Gravatar": "Kasuta Gravatari", + "Use groups to group your users and assign permissions.": "Kasutage gruppe oma kasutajate grupeerimiseks ja õiguste määramiseks.", + "Use Initials": "Kasuta initsiaale", + "use_mlock (Ollama)": "use_mlock (Ollama)", + "use_mmap (Ollama)": "use_mmap (Ollama)", + "user": "kasutaja", + "User": "Kasutaja", + "User location successfully retrieved.": "Kasutaja asukoht edukalt hangitud.", + "Username": "Kasutajanimi", + "Users": "Kasutajad", + "Using the default arena model with all models. Click the plus button to add custom models.": "Kasutatakse vaikimisi areena mudelit kõigi mudelitega. Kohandatud mudelite lisamiseks klõpsake plussmärgiga nuppu.", + "Utilize": "Kasuta", + "Valid time units:": "Kehtivad ajaühikud:", + "Valves": "Klapid", + "Valves updated": "Klapid uuendatud", + "Valves updated successfully": "Klapid edukalt uuendatud", + "variable": "muutuja", + "variable to have them replaced with clipboard content.": "muutuja, et need asendataks lõikelaua sisuga.", + "Version": "Versioon", + "Version {{selectedVersion}} of {{totalVersions}}": "Versioon {{selectedVersion}} / {{totalVersions}}", + "View Replies": "Vaata vastuseid", + "Visibility": "Nähtavus", + "Voice": "Hääl", + "Voice Input": "Hääle sisend", + "Warning": "Hoiatus", + "Warning:": "Hoiatus:", + "Warning: Enabling this will allow users to upload arbitrary code on the server.": "Hoiatus: Selle lubamine võimaldab kasutajatel üles laadida suvalist koodi serverisse.", + "Warning: If you update or change your embedding model, you will need to re-import all documents.": "Hoiatus: Kui uuendate või muudate oma manustamise mudelit, peate kõik dokumendid uuesti importima.", + "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Hoiatus: Jupyter täitmine võimaldab suvalise koodi käivitamist, mis kujutab endast tõsist turvariski - jätkake äärmise ettevaatusega.", + "Web": "Veeb", + "Web API": "Veebi API", + "Web Search": "Veebiotsing", + "Web Search Engine": "Veebi otsingumootor", + "Web Search in Chat": "Veebiotsing vestluses", + "Web Search Query Generation": "Veebi otsingupäringu genereerimine", + "Webhook URL": "Webhooki URL", + "WebUI Settings": "WebUI seaded", + "WebUI URL": "WebUI URL", + "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI teeb päringuid aadressile \"{{url}}/api/chat\"", + "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI teeb päringuid aadressile \"{{url}}/chat/completions\"", + "What are you trying to achieve?": "Mida te püüate saavutada?", + "What are you working on?": "Millega te tegelete?", + "What’s New in": "Mis on uut", + "When enabled, the model will respond to each chat message in real-time, generating a response as soon as the user sends a message. This mode is useful for live chat applications, but may impact performance on slower hardware.": "Kui see on lubatud, vastab mudel igale vestlussõnumile reaalajas, genereerides vastuse niipea, kui kasutaja sõnumi saadab. See režiim on kasulik reaalajas vestlusrakendustes, kuid võib mõjutada jõudlust aeglasema riistvara puhul.", + "wherever you are": "kus iganes te olete", + "Whisper (Local)": "Whisper (lokaalne)", + "Why?": "Miks?", + "Widescreen Mode": "Laiekraani režiim", + "Won": "Võitis", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Töötab koos top-k-ga. Kõrgem väärtus (nt 0,95) annab tulemuseks mitmekesisema teksti, samas kui madalam väärtus (nt 0,5) genereerib keskendunuma ja konservatiivsema teksti.", + "Workspace": "Tööala", + "Workspace Permissions": "Tööala õigused", + "Write": "Kirjuta", + "Write a prompt suggestion (e.g. Who are you?)": "Kirjutage vihje soovitus (nt Kes sa oled?)", + "Write a summary in 50 words that summarizes [topic or keyword].": "Kirjutage 50-sõnaline kokkuvõte, mis võtab kokku [teema või märksõna].", + "Write something...": "Kirjutage midagi...", + "Write your model template content here": "Kirjutage oma mudeli malli sisu siia", + "Yesterday": "Eile", + "You": "Sina", + "You are currently using a trial license. Please contact support to upgrade your license.": "Kasutate praegu proovilitsentsi. Palun võtke ühendust toega, et oma litsentsi uuendada.", + "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Saate korraga vestelda maksimaalselt {{maxCount}} faili(ga).", + "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Saate isikupärastada oma suhtlust LLM-idega, lisades mälestusi alumise 'Halda' nupu kaudu, muutes need kasulikumaks ja teile kohandatumaks.", + "You cannot upload an empty file.": "Te ei saa üles laadida tühja faili.", + "You do not have permission to access this feature.": "Teil pole õigust sellele funktsioonile ligi pääseda.", + "You do not have permission to upload files": "Teil pole õigust faile üles laadida", + "You do not have permission to upload files.": "Teil pole õigust faile üles laadida.", + "You have no archived conversations.": "Teil pole arhiveeritud vestlusi.", + "You have shared this chat": "Olete seda vestlust jaganud", + "You're a helpful assistant.": "Oled abivalmis assistent.", + "You're now logged in.": "Olete nüüd sisse logitud.", + "Your account status is currently pending activation.": "Teie konto staatus on praegu ootel aktiveerimist.", + "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Kogu teie toetus läheb otse pistikprogrammi arendajale; Open WebUI ei võta mingit protsenti. Kuid valitud rahastamisplatvormil võivad olla oma tasud.", + "Youtube": "Youtube", + "Youtube Language": "Youtube keel", + "Youtube Proxy URL": "Youtube puhverserveri URL" +} \ No newline at end of file diff --git a/src/lib/i18n/locales/languages.json b/src/lib/i18n/locales/languages.json index 5672e45929f..6b509f5046a 100644 --- a/src/lib/i18n/locales/languages.json +++ b/src/lib/i18n/locales/languages.json @@ -11,6 +11,10 @@ "code": "ar-BH", "title": "Arabic (عربي)" }, + { + "code": "eu-ES", + "title": "Basque (Euskara)" + }, { "code": "bn-BD", "title": "Bengali (বাংলা)" @@ -27,6 +31,10 @@ "code": "ceb-PH", "title": "Cebuano (Filipino)" }, + { + "code": "hr-HR", + "title": "Croatian (Hrvatski)" + }, { "code": "cs-CZ", "title": "Czech (čeština)" @@ -36,20 +44,12 @@ "title": "Danish (Denmark)" }, { - "code": "de-DE", - "title": "German (Deutsch)" - }, - { - "code": "es-ES", - "title": "Spanish (Español)" - }, - { - "code": "eu-ES", - "title": "Basque (Euskara)" + "code": "nl-NL", + "title": "Dutch (Netherlands)" }, { - "code": "fa-IR", - "title": "Persian (فارسی)" + "code": "et-EE", + "title": "Estonian (Eesti)" }, { "code": "fi-FI", @@ -63,6 +63,14 @@ "code": "fr-FR", "title": "French (France)" }, + { + "code": "ka-GE", + "title": "Georgian (ქართული)" + }, + { + "code": "de-DE", + "title": "German (Deutsch)" + }, { "code": "el-GR", "title": "Greek (Ἑλλάδα)" @@ -75,10 +83,6 @@ "code": "hi-IN", "title": "Hindi (हिंदी)" }, - { - "code": "hr-HR", - "title": "Croatian (Hrvatski)" - }, { "code": "hu-HU", "title": "Hungarian (Magyar)" @@ -99,10 +103,6 @@ "code": "ja-JP", "title": "Japanese (日本語)" }, - { - "code": "ka-GE", - "title": "Georgian (ქართული)" - }, { "code": "ko-KR", "title": "Korean (한국어)" @@ -120,12 +120,8 @@ "title": "Norwegian Bokmål (Norway)" }, { - "code": "nl-NL", - "title": "Dutch (Netherlands)" - }, - { - "code": "pa-IN", - "title": "Punjabi (India)" + "code": "fa-IR", + "title": "Persian (فارسی)" }, { "code": "pl-PL", @@ -139,6 +135,10 @@ "code": "pt-PT", "title": "Portuguese (Portugal)" }, + { + "code": "pa-IN", + "title": "Punjabi (India)" + }, { "code": "ro-RO", "title": "Romanian (Romania)" @@ -147,17 +147,21 @@ "code": "ru-RU", "title": "Russian (Russia)" }, + { + "code": "sr-RS", + "title": "Serbian (Српски)" + }, { "code": "sk-SK", "title": "Slovak (Slovenčina)" }, { - "code": "sv-SE", - "title": "Swedish (Svenska)" + "code": "es-ES", + "title": "Spanish (Español)" }, { - "code": "sr-RS", - "title": "Serbian (Српски)" + "code": "sv-SE", + "title": "Swedish (Svenska)" }, { "code": "th-TH", @@ -195,4 +199,4 @@ "code": "dg-DG", "title": "Doge (🐶)" } -] +] \ No newline at end of file From c4c6e02b4c2b9819b33dc85af1bdeae6ecd80078 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=B4mulo=20Mendes=20Figueiredo?= Date: Thu, 20 Mar 2025 16:05:26 -0300 Subject: [PATCH 363/623] fix: redirection for users already logged in --- src/routes/auth/+page.svelte | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/routes/auth/+page.svelte b/src/routes/auth/+page.svelte index 0accf85ccc7..be8989002e4 100644 --- a/src/routes/auth/+page.svelte +++ b/src/routes/auth/+page.svelte @@ -140,7 +140,8 @@ onMount(async () => { if ($user !== undefined) { - await goto('/'); + const redirectPath = querystringValue('redirect') || '/'; + goto(redirectPath); } await checkOauthCallback(); From b96557c46e16066bc60d4a20df4d0c7d23e2f802 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 13:55:13 -0700 Subject: [PATCH 364/623] refac: styling --- src/lib/components/layout/Sidebar/SearchInput.svelte | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/lib/components/layout/Sidebar/SearchInput.svelte b/src/lib/components/layout/Sidebar/SearchInput.svelte index c1438cede6e..6dca9a4eb98 100644 --- a/src/lib/components/layout/Sidebar/SearchInput.svelte +++ b/src/lib/components/layout/Sidebar/SearchInput.svelte @@ -105,7 +105,7 @@
{ @@ -147,14 +147,14 @@ } }} /> - + {#if showClearButton && value} -
-
{/if} From 9b20ef492205f6c773a7557d82d1fe5ebeafd5b0 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 14:01:47 -0700 Subject: [PATCH 365/623] refac --- backend/open_webui/config.py | 8 ++++---- backend/open_webui/main.py | 4 ++-- backend/open_webui/retrieval/web/utils.py | 18 +++++++----------- 3 files changed, 13 insertions(+), 17 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index c25e0e046a2..1162fde221c 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -2081,10 +2081,10 @@ class BannerModel(BaseModel): os.environ.get("PLAYWRIGHT_WS_URI", None), ) -PLAYWRIGHT_GOTO_TIMEOUT = PersistentConfig( - "PLAYWRIGHT_GOTO_TIMEOUT", - "rag.web.loader.engine.playwright.goto.timeout", - int(os.environ.get("PLAYWRIGHT_GOTO_TIMEOUT", "10")), +PLAYWRIGHT_TIMEOUT = PersistentConfig( + "PLAYWRIGHT_TIMEOUT", + "rag.web.loader.engine.playwright.timeout", + int(os.environ.get("PLAYWRIGHT_TIMEOUT", "10")), ) FIRECRAWL_API_KEY = PersistentConfig( diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 228c92e6447..6749260554b 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -155,7 +155,7 @@ AUDIO_TTS_AZURE_SPEECH_REGION, AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT, PLAYWRIGHT_WS_URI, - PLAYWRIGHT_GOTO_TIMEOUT, + PLAYWRIGHT_TIMEOUT, FIRECRAWL_API_BASE_URL, FIRECRAWL_API_KEY, RAG_WEB_LOADER_ENGINE, @@ -630,7 +630,7 @@ async def lifespan(app: FastAPI): app.state.config.RAG_WEB_LOADER_ENGINE = RAG_WEB_LOADER_ENGINE app.state.config.RAG_WEB_SEARCH_TRUST_ENV = RAG_WEB_SEARCH_TRUST_ENV app.state.config.PLAYWRIGHT_WS_URI = PLAYWRIGHT_WS_URI -app.state.config.PLAYWRIGHT_GOTO_TIMEOUT = PLAYWRIGHT_GOTO_TIMEOUT +app.state.config.PLAYWRIGHT_TIMEOUT = PLAYWRIGHT_TIMEOUT app.state.config.FIRECRAWL_API_BASE_URL = FIRECRAWL_API_BASE_URL app.state.config.FIRECRAWL_API_KEY = FIRECRAWL_API_KEY app.state.config.TAVILY_EXTRACT_DEPTH = TAVILY_EXTRACT_DEPTH diff --git a/backend/open_webui/retrieval/web/utils.py b/backend/open_webui/retrieval/web/utils.py index 0eee00879e1..942cb8483fa 100644 --- a/backend/open_webui/retrieval/web/utils.py +++ b/backend/open_webui/retrieval/web/utils.py @@ -29,7 +29,7 @@ from open_webui.config import ( ENABLE_RAG_LOCAL_WEB_FETCH, PLAYWRIGHT_WS_URI, - PLAYWRIGHT_GOTO_TIMEOUT, + PLAYWRIGHT_TIMEOUT, RAG_WEB_LOADER_ENGINE, FIRECRAWL_API_BASE_URL, FIRECRAWL_API_KEY, @@ -377,7 +377,7 @@ class SafePlaywrightURLLoader(PlaywrightURLLoader, RateLimitMixin, URLProcessing headless (bool): If True, the browser will run in headless mode. proxy (dict): Proxy override settings for the Playwright session. playwright_ws_url (Optional[str]): WebSocket endpoint URI for remote browser connection. - playwright_goto_timeout (Optional[int]): Maximum operation time in milliseconds. + playwright_timeout (Optional[int]): Maximum operation time in milliseconds. """ def __init__( @@ -391,7 +391,7 @@ def __init__( remove_selectors: Optional[List[str]] = None, proxy: Optional[Dict[str, str]] = None, playwright_ws_url: Optional[str] = None, - playwright_goto_timeout: Optional[int] = 10000, + playwright_timeout: Optional[int] = 10000, ): """Initialize with additional safety parameters and remote browser support.""" @@ -418,7 +418,7 @@ def __init__( self.last_request_time = None self.playwright_ws_url = playwright_ws_url self.trust_env = trust_env - self.playwright_goto_timeout = playwright_goto_timeout + self.playwright_timeout = playwright_timeout def lazy_load(self) -> Iterator[Document]: """Safely load URLs synchronously with support for remote browser.""" @@ -435,7 +435,7 @@ def lazy_load(self) -> Iterator[Document]: try: self._safe_process_url_sync(url) page = browser.new_page() - response = page.goto(url, timeout=self.playwright_goto_timeout) + response = page.goto(url, timeout=self.playwright_timeout) if response is None: raise ValueError(f"page.goto() returned None for url {url}") @@ -466,9 +466,7 @@ async def alazy_load(self) -> AsyncIterator[Document]: try: await self._safe_process_url(url) page = await browser.new_page() - response = await page.goto( - url, timeout=self.playwright_goto_timeout - ) + response = await page.goto(url, timeout=self.playwright_timeout) if response is None: raise ValueError(f"page.goto() returned None for url {url}") @@ -611,9 +609,7 @@ def get_web_loader( } if RAG_WEB_LOADER_ENGINE.value == "playwright": - web_loader_args["playwright_goto_timeout"] = ( - PLAYWRIGHT_GOTO_TIMEOUT.value * 1000 - ) + web_loader_args["playwright_timeout"] = PLAYWRIGHT_TIMEOUT.value * 1000 if PLAYWRIGHT_WS_URI.value: web_loader_args["playwright_ws_url"] = PLAYWRIGHT_WS_URI.value From 1d305e7b2f401f1241815a2d9b4b34ba7ec99789 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 14:02:33 -0700 Subject: [PATCH 366/623] chore: format --- src/lib/i18n/locales/es-ES/translation.json | 2 +- src/lib/i18n/locales/et-EE/translation.json | 2367 ++++++++++--------- src/lib/i18n/locales/languages.json | 2 +- 3 files changed, 1192 insertions(+), 1179 deletions(-) diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index b65e0fdc679..a27cebbef18 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -390,7 +390,7 @@ "Enter Chunk Size": "Ingrese el tamaño del fragmento", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Entre pares \"token:bias_value\" separados por comas (ejemplo: 5432:100, 413:-100)", "Enter description": "Ingrese la descripción", - "Enter Docling Server URL": "", + "Enter Docling Server URL": "", "Enter Document Intelligence Endpoint": "Entre el Endpoint de Document Intelligence", "Enter Document Intelligence Key": "Entre la Clave de Document Intelligence", "Enter domains separated by commas (e.g., example.com,site.org)": "Entre dominios separados por comas (p.ej., ejemplo.com,sitio.org)", diff --git a/src/lib/i18n/locales/et-EE/translation.json b/src/lib/i18n/locales/et-EE/translation.json index 0065f8871ac..6cfe097bdd3 100644 --- a/src/lib/i18n/locales/et-EE/translation.json +++ b/src/lib/i18n/locales/et-EE/translation.json @@ -1,1178 +1,1191 @@ { - "-1 for no limit, or a positive integer for a specific limit": "-1 piirangu puudumisel või positiivne täisarv konkreetse piirangu jaoks", - "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' või '-1' aegumiseta.", - "(e.g. `sh webui.sh --api --api-auth username_password`)": "(nt `sh webui.sh --api --api-auth kasutajanimi_parool`)", - "(e.g. `sh webui.sh --api`)": "(nt `sh webui.sh --api`)", - "(latest)": "(uusim)", - "{{ models }}": "{{ mudelid }}", - "{{COUNT}} hidden lines": "{{COUNT}} peidetud rida", - "{{COUNT}} Replies": "{{COUNT}} vastust", - "{{user}}'s Chats": "{{user}} vestlused", - "{{webUIName}} Backend Required": "{{webUIName}} taustaserver on vajalik", - "*Prompt node ID(s) are required for image generation": "*Vihje sõlme ID(d) on piltide genereerimiseks vajalikud", - "A new version (v{{LATEST_VERSION}}) is now available.": "Uus versioon (v{{LATEST_VERSION}}) on saadaval.", - "A task model is used when performing tasks such as generating titles for chats and web search queries": "Ülesande mudelit kasutatakse selliste toimingute jaoks nagu vestluste pealkirjade ja veebiotsingu päringute genereerimine", - "a user": "kasutaja", - "About": "Teave", - "Accept autocomplete generation / Jump to prompt variable": "Nõustu automaattäitmisega / Liigu vihjete muutujale", - "Access": "Juurdepääs", - "Access Control": "Juurdepääsu kontroll", - "Accessible to all users": "Kättesaadav kõigile kasutajatele", - "Account": "Konto", - "Account Activation Pending": "Konto aktiveerimine ootel", - "Accurate information": "Täpne informatsioon", - "Actions": "Toimingud", - "Activate": "Aktiveeri", - "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Aktiveeri see käsk, trükkides \"/{{COMMAND}}\" vestluse sisendritta.", - "Active Users": "Aktiivsed kasutajad", - "Add": "Lisa", - "Add a model ID": "Lisa mudeli ID", - "Add a short description about what this model does": "Lisa lühike kirjeldus, mida see mudel teeb", - "Add a tag": "Lisa silt", - "Add Arena Model": "Lisa Areena mudel", - "Add Connection": "Lisa ühendus", - "Add Content": "Lisa sisu", - "Add content here": "Lisa siia sisu", - "Add custom prompt": "Lisa kohandatud vihjeid", - "Add Files": "Lisa faile", - "Add Group": "Lisa grupp", - "Add Memory": "Lisa mälu", - "Add Model": "Lisa mudel", - "Add Reaction": "Lisa reaktsioon", - "Add Tag": "Lisa silt", - "Add Tags": "Lisa silte", - "Add text content": "Lisa tekstisisu", - "Add User": "Lisa kasutaja", - "Add User Group": "Lisa kasutajagrupp", - "Adjusting these settings will apply changes universally to all users.": "Nende seadete kohandamine rakendab muudatused universaalselt kõigile kasutajatele.", - "admin": "admin", - "Admin": "Administraator", - "Admin Panel": "Administraatori paneel", - "Admin Settings": "Administraatori seaded", - "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administraatoritel on alati juurdepääs kõigile tööriistadele; kasutajatele tuleb tööriistad määrata mudeli põhiselt tööruumis.", - "Advanced Parameters": "Täpsemad parameetrid", - "Advanced Params": "Täpsemad parameetrid", - "All": "Kõik", - "All Documents": "Kõik dokumendid", - "All models deleted successfully": "Kõik mudelid edukalt kustutatud", - "Allow Chat Controls": "Luba vestluse kontrollnupud", - "Allow Chat Delete": "Luba vestluse kustutamine", - "Allow Chat Deletion": "Luba vestluse kustutamine", - "Allow Chat Edit": "Luba vestluse muutmine", - "Allow File Upload": "Luba failide üleslaadimine", - "Allow non-local voices": "Luba mitte-lokaalsed hääled", - "Allow Temporary Chat": "Luba ajutine vestlus", - "Allow User Location": "Luba kasutaja asukoht", - "Allow Voice Interruption in Call": "Luba hääle katkestamine kõnes", - "Allowed Endpoints": "Lubatud lõpp-punktid", - "Already have an account?": "Kas teil on juba konto?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternatiiv top_p-le ja eesmärk on tagada kvaliteedi ja mitmekesisuse tasakaal. Parameeter p esindab minimaalset tõenäosust tokeni arvesse võtmiseks, võrreldes kõige tõenäolisema tokeni tõenäosusega. Näiteks p=0.05 korral, kui kõige tõenäolisema tokeni tõenäosus on 0.9, filtreeritakse välja logitid väärtusega alla 0.045.", - "Always": "Alati", - "Amazing": "Suurepärane", - "an assistant": "assistent", - "Analyzed": "Analüüsitud", - "Analyzing...": "Analüüsimine...", - "and": "ja", - "and {{COUNT}} more": "ja veel {{COUNT}}", - "and create a new shared link.": "ja looge uus jagatud link.", - "API Base URL": "API baas-URL", - "API Key": "API võti", - "API Key created.": "API võti loodud.", - "API Key Endpoint Restrictions": "API võtme lõpp-punkti piirangud", - "API keys": "API võtmed", - "Application DN": "Rakenduse DN", - "Application DN Password": "Rakenduse DN parool", - "applies to all users with the \"user\" role": "kehtib kõigile kasutajatele \"kasutaja\" rolliga", - "April": "Aprill", - "Archive": "Arhiveeri", - "Archive All Chats": "Arhiveeri kõik vestlused", - "Archived Chats": "Arhiveeritud vestlused", - "archived-chat-export": "arhiveeritud-vestluste-eksport", - "Are you sure you want to clear all memories? This action cannot be undone.": "Kas olete kindel, et soovite kustutada kõik mälestused? Seda toimingut ei saa tagasi võtta.", - "Are you sure you want to delete this channel?": "Kas olete kindel, et soovite selle kanali kustutada?", - "Are you sure you want to delete this message?": "Kas olete kindel, et soovite selle sõnumi kustutada?", - "Are you sure you want to unarchive all archived chats?": "Kas olete kindel, et soovite kõik arhiveeritud vestlused arhiivist eemaldada?", - "Are you sure?": "Kas olete kindel?", - "Arena Models": "Areena mudelid", - "Artifacts": "Tekkinud objektid", - "Ask": "Küsi", - "Ask a question": "Esita küsimus", - "Assistant": "Assistent", - "Attach file from knowledge": "Lisa fail teadmiste baasist", - "Attention to detail": "Tähelepanu detailidele", - "Attribute for Mail": "E-posti atribuut", - "Attribute for Username": "Kasutajanime atribuut", - "Audio": "Heli", - "August": "August", - "Authenticate": "Autendi", - "Authentication": "Autentimine", - "Auto-Copy Response to Clipboard": "Kopeeri vastus automaatselt lõikelauale", - "Auto-playback response": "Mängi vastus automaatselt", - "Autocomplete Generation": "Automaattäitmise genereerimine", - "Autocomplete Generation Input Max Length": "Automaattäitmise genereerimise sisendi maksimaalne pikkus", - "Automatic1111": "Automatic1111", - "AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 API autentimise string", - "AUTOMATIC1111 Base URL": "AUTOMATIC1111 baas-URL", - "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 baas-URL on nõutav.", - "Available list": "Saadaolevate nimekiri", - "available!": "saadaval!", - "Awful": "Kohutav", - "Azure AI Speech": "Azure AI Kõne", - "Azure Region": "Azure regioon", - "Back": "Tagasi", - "Bad Response": "Halb vastus", - "Banners": "Bännerid", - "Base Model (From)": "Baas mudel (Allikas)", - "Batch Size (num_batch)": "Partii suurus (num_batch)", - "before": "enne", - "Being lazy": "Laisklemine", - "Beta": "Beeta", - "Bing Search V7 Endpoint": "Bing Search V7 lõpp-punkt", - "Bing Search V7 Subscription Key": "Bing Search V7 tellimuse võti", - "Bocha Search API Key": "Bocha otsingu API võti", - "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Konkreetsete tokenite võimendamine või karistamine piiratud vastuste jaoks. Kallutatuse väärtused piiratakse vahemikku -100 kuni 100 (kaasa arvatud). (Vaikimisi: puudub)", - "Brave Search API Key": "Brave Search API võti", - "By {{name}}": "Autor: {{name}}", - "Bypass Embedding and Retrieval": "Möödaminek sisestamisest ja taastamisest", - "Bypass SSL verification for Websites": "Möödaminek veebisaitide SSL-kontrollimisest", - "Calendar": "Kalender", - "Call": "Kõne", - "Call feature is not supported when using Web STT engine": "Kõnefunktsioon ei ole Web STT mootorit kasutades toetatud", - "Camera": "Kaamera", - "Cancel": "Tühista", - "Capabilities": "Võimekused", - "Capture": "Jäädvusta", - "Certificate Path": "Sertifikaadi tee", - "Change Password": "Muuda parooli", - "Channel Name": "Kanali nimi", - "Channels": "Kanalid", - "Character": "Tegelane", - "Character limit for autocomplete generation input": "Märkide piirang automaattäitmise genereerimise sisendile", - "Chart new frontiers": "Kaardista uusi piire", - "Chat": "Vestlus", - "Chat Background Image": "Vestluse taustapilt", - "Chat Bubble UI": "Vestlusmullide kasutajaliides", - "Chat Controls": "Vestluse juhtnupud", - "Chat direction": "Vestluse suund", - "Chat Overview": "Vestluse ülevaade", - "Chat Permissions": "Vestluse õigused", - "Chat Tags Auto-Generation": "Vestluse siltide automaatnegeneerimine", - "Chats": "Vestlused", - "Check Again": "Kontrolli uuesti", - "Check for updates": "Kontrolli uuendusi", - "Checking for updates...": "Uuenduste kontrollimine...", - "Choose a model before saving...": "Valige mudel enne salvestamist...", - "Chunk Overlap": "Tükkide ülekate", - "Chunk Size": "Tüki suurus", - "Ciphers": "Šifrid", - "Citation": "Viide", - "Clear memory": "Tühjenda mälu", - "Clear Memory": "Tühjenda mälu", - "click here": "klõpsake siia", - "Click here for filter guides.": "Filtri juhiste jaoks klõpsake siia.", - "Click here for help.": "Abi saamiseks klõpsake siia.", - "Click here to": "Klõpsake siia, et", - "Click here to download user import template file.": "Klõpsake siia kasutajate importimise mallifaili allalaadimiseks.", - "Click here to learn more about faster-whisper and see the available models.": "Klõpsake siia, et teada saada rohkem faster-whisper kohta ja näha saadaolevaid mudeleid.", - "Click here to see available models.": "Klõpsake siia, et näha saadaolevaid mudeleid.", - "Click here to select": "Klõpsake siia valimiseks", - "Click here to select a csv file.": "Klõpsake siia csv-faili valimiseks.", - "Click here to select a py file.": "Klõpsake siia py-faili valimiseks.", - "Click here to upload a workflow.json file.": "Klõpsake siia workflow.json faili üleslaadimiseks.", - "click here.": "klõpsake siia.", - "Click on the user role button to change a user's role.": "Kasutaja rolli muutmiseks klõpsake kasutaja rolli nuppu.", - "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Lõikelaua kirjutamisõigust ei antud. Kontrollige oma brauseri seadeid, et anda vajalik juurdepääs.", - "Clone": "Klooni", - "Clone Chat": "Klooni vestlus", - "Clone of {{TITLE}}": "{{TITLE}} koopia", - "Close": "Sulge", - "Code execution": "Koodi täitmine", - "Code Execution": "Koodi täitmine", - "Code Execution Engine": "Koodi täitmise mootor", - "Code Execution Timeout": "Koodi täitmise aegumine", - "Code formatted successfully": "Kood vormindatud edukalt", - "Code Interpreter": "Koodi interpretaator", - "Code Interpreter Engine": "Koodi interpretaatori mootor", - "Code Interpreter Prompt Template": "Koodi interpretaatori vihje mall", - "Collapse": "Ahenda", - "Collection": "Kogu", - "Color": "Värv", - "ComfyUI": "ComfyUI", - "ComfyUI API Key": "ComfyUI API võti", - "ComfyUI Base URL": "ComfyUI baas-URL", - "ComfyUI Base URL is required.": "ComfyUI baas-URL on nõutav.", - "ComfyUI Workflow": "ComfyUI töövoog", - "ComfyUI Workflow Nodes": "ComfyUI töövoo sõlmed", - "Command": "Käsk", - "Completions": "Lõpetamised", - "Concurrent Requests": "Samaaegsed päringud", - "Configure": "Konfigureeri", - "Confirm": "Kinnita", - "Confirm Password": "Kinnita parool", - "Confirm your action": "Kinnita oma toiming", - "Confirm your new password": "Kinnita oma uus parool", - "Connect to your own OpenAI compatible API endpoints.": "Ühendu oma OpenAI-ga ühilduvate API lõpp-punktidega.", - "Connections": "Ühendused", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Piirab arutluse pingutust arutlusvõimelistele mudelitele. Kohaldatav ainult konkreetsete pakkujate arutlusmudelitele, mis toetavad arutluspingutust.", - "Contact Admin for WebUI Access": "Võtke WebUI juurdepääsu saamiseks ühendust administraatoriga", - "Content": "Sisu", - "Content Extraction Engine": "Sisu ekstraheerimise mootor", - "Context Length": "Konteksti pikkus", - "Continue Response": "Jätka vastust", - "Continue with {{provider}}": "Jätka {{provider}}-ga", - "Continue with Email": "Jätka e-postiga", - "Continue with LDAP": "Jätka LDAP-ga", - "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrolli, kuidas sõnumitekst on jagatud TTS-päringute jaoks. 'Kirjavahemärgid' jagab lauseteks, 'lõigud' jagab lõikudeks ja 'puudub' hoiab sõnumi ühe stringina.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Kontrollige tokeni järjestuste kordumist genereeritud tekstis. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 1,1) on leebem. Väärtuse 1 korral on see keelatud.", - "Controls": "Juhtnupud", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "Kontrollib väljundi sidususe ja mitmekesisuse vahelist tasakaalu. Madalam väärtus annab tulemuseks fokuseerituma ja sidusamaja teksti.", - "Copied": "Kopeeritud", - "Copied shared chat URL to clipboard!": "Jagatud vestluse URL kopeeritud lõikelauale!", - "Copied to clipboard": "Kopeeritud lõikelauale", - "Copy": "Kopeeri", - "Copy last code block": "Kopeeri viimane koodiplokk", - "Copy last response": "Kopeeri viimane vastus", - "Copy Link": "Kopeeri link", - "Copy to clipboard": "Kopeeri lõikelauale", - "Copying to clipboard was successful!": "Lõikelauale kopeerimine õnnestus!", - "CORS must be properly configured by the provider to allow requests from Open WebUI.": "Teenusepakkuja peab nõuetekohaselt konfigureerima CORS-i, et lubada päringuid Open WebUI-lt.", - "Create": "Loo", - "Create a knowledge base": "Loo teadmiste baas", - "Create a model": "Loo mudel", - "Create Account": "Loo konto", - "Create Admin Account": "Loo administraatori konto", - "Create Channel": "Loo kanal", - "Create Group": "Loo grupp", - "Create Knowledge": "Loo teadmised", - "Create new key": "Loo uus võti", - "Create new secret key": "Loo uus salavõti", - "Created at": "Loomise aeg", - "Created At": "Loomise aeg", - "Created by": "Autor", - "CSV Import": "CSV import", - "Ctrl+Enter to Send": "Ctrl+Enter saatmiseks", - "Current Model": "Praegune mudel", - "Current Password": "Praegune parool", - "Custom": "Kohandatud", - "Danger Zone": "Ohutsoon", - "Dark": "Tume", - "Database": "Andmebaas", - "December": "Detsember", - "Default": "Vaikimisi", - "Default (Open AI)": "Vaikimisi (Open AI)", - "Default (SentenceTransformers)": "Vaikimisi (SentenceTransformers)", - "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model's built-in tool-calling capabilities, but requires the model to inherently support this feature.": "Vaikerežiim töötab laiema mudelite valikuga, kutsudes tööriistad välja enne täitmist. Kohalik režiim kasutab mudeli sisseehitatud tööriistade väljakutsumise võimalusi, kuid eeldab, et mudel toetab sisemiselt seda funktsiooni.", - "Default Model": "Vaikimisi mudel", - "Default model updated": "Vaikimisi mudel uuendatud", - "Default Models": "Vaikimisi mudelid", - "Default permissions": "Vaikimisi õigused", - "Default permissions updated successfully": "Vaikimisi õigused edukalt uuendatud", - "Default Prompt Suggestions": "Vaikimisi vihjete soovitused", - "Default to 389 or 636 if TLS is enabled": "Vaikimisi 389 või 636, kui TLS on lubatud", - "Default to ALL": "Vaikimisi KÕIK", - "Default User Role": "Vaikimisi kasutaja roll", - "Delete": "Kustuta", - "Delete a model": "Kustuta mudel", - "Delete All Chats": "Kustuta kõik vestlused", - "Delete All Models": "Kustuta kõik mudelid", - "Delete chat": "Kustuta vestlus", - "Delete Chat": "Kustuta vestlus", - "Delete chat?": "Kustutada vestlus?", - "Delete folder?": "Kustutada kaust?", - "Delete function?": "Kustutada funktsioon?", - "Delete Message": "Kustuta sõnum", - "Delete message?": "Kustutada sõnum?", - "Delete prompt?": "Kustutada vihjed?", - "delete this link": "kustuta see link", - "Delete tool?": "Kustutada tööriist?", - "Delete User": "Kustuta kasutaja", - "Deleted {{deleteModelTag}}": "Kustutatud {{deleteModelTag}}", - "Deleted {{name}}": "Kustutatud {{name}}", - "Deleted User": "Kustutatud kasutaja", - "Describe your knowledge base and objectives": "Kirjeldage oma teadmiste baasi ja eesmärke", - "Description": "Kirjeldus", - "Didn't fully follow instructions": "Ei järginud täielikult juhiseid", - "Direct Connections": "Otsesed ühendused", - "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Otsesed ühendused võimaldavad kasutajatel ühenduda oma OpenAI-ga ühilduvate API lõpp-punktidega.", - "Direct Connections settings updated": "Otseste ühenduste seaded uuendatud", - "Disabled": "Keelatud", - "Discover a function": "Avasta funktsioon", - "Discover a model": "Avasta mudel", - "Discover a prompt": "Avasta vihje", - "Discover a tool": "Avasta tööriist", - "Discover how to use Open WebUI and seek support from the community.": "Avastage, kuidas kasutada Open WebUI-d ja otsige tuge kogukonnalt.", - "Discover wonders": "Avasta imesid", - "Discover, download, and explore custom functions": "Avasta, laadi alla ja uuri kohandatud funktsioone", - "Discover, download, and explore custom prompts": "Avasta, laadi alla ja uuri kohandatud vihjeid", - "Discover, download, and explore custom tools": "Avasta, laadi alla ja uuri kohandatud tööriistu", - "Discover, download, and explore model presets": "Avasta, laadi alla ja uuri mudeli eelseadistusi", - "Dismissible": "Sulgetav", - "Display": "Kuva", - "Display Emoji in Call": "Kuva kõnes emoji", - "Display the username instead of You in the Chat": "Kuva vestluses 'Sina' asemel kasutajanimi", - "Displays citations in the response": "Kuvab vastuses viited", - "Dive into knowledge": "Sukeldu teadmistesse", - "Do not install functions from sources you do not fully trust.": "Ärge installige funktsioone allikatest, mida te täielikult ei usalda.", - "Do not install tools from sources you do not fully trust.": "Ärge installige tööriistu allikatest, mida te täielikult ei usalda.", - "Document": "Dokument", - "Document Intelligence": "Dokumendi intelligentsus", - "Document Intelligence endpoint and key required.": "Dokumendi intelligentsuse lõpp-punkt ja võti on nõutavad.", - "Documentation": "Dokumentatsioon", - "Documents": "Dokumendid", - "does not make any external connections, and your data stays securely on your locally hosted server.": "ei loo väliseid ühendusi ja teie andmed jäävad turvaliselt teie kohalikult majutatud serverisse.", - "Domain Filter List": "Domeeni filtri nimekiri", - "Don't have an account?": "Pole kontot?", - "don't install random functions from sources you don't trust.": "ärge installige juhuslikke funktsioone allikatest, mida te ei usalda.", - "don't install random tools from sources you don't trust.": "ärge installige juhuslikke tööriistu allikatest, mida te ei usalda.", - "Don't like the style": "Stiil ei meeldi", - "Done": "Valmis", - "Download": "Laadi alla", - "Download as SVG": "Laadi alla SVG-na", - "Download canceled": "Allalaadimine tühistatud", - "Download Database": "Laadi alla andmebaas", - "Drag and drop a file to upload or select a file to view": "Lohistage ja kukutage fail üleslaadimiseks või valige fail vaatamiseks", - "Draw": "Joonista", - "Drop any files here to add to the conversation": "Lohistage siia mistahes failid, et lisada need vestlusele", - "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "nt '30s', '10m'. Kehtivad ajaühikud on 's', 'm', 'h'.", - "e.g. 60": "nt 60", - "e.g. A filter to remove profanity from text": "nt filter, mis eemaldab tekstist roppused", - "e.g. My Filter": "nt Minu Filter", - "e.g. My Tools": "nt Minu Tööriistad", - "e.g. my_filter": "nt minu_filter", - "e.g. my_tools": "nt minu_toriistad", - "e.g. Tools for performing various operations": "nt tööriistad mitmesuguste operatsioonide teostamiseks", - "Edit": "Muuda", - "Edit Arena Model": "Muuda Areena mudelit", - "Edit Channel": "Muuda kanalit", - "Edit Connection": "Muuda ühendust", - "Edit Default Permissions": "Muuda vaikimisi õigusi", - "Edit Memory": "Muuda mälu", - "Edit User": "Muuda kasutajat", - "Edit User Group": "Muuda kasutajagruppi", - "ElevenLabs": "ElevenLabs", - "Email": "E-post", - "Embark on adventures": "Alusta seiklusi", - "Embedding": "Manustamine", - "Embedding Batch Size": "Manustamise partii suurus", - "Embedding Model": "Manustamise mudel", - "Embedding Model Engine": "Manustamise mudeli mootor", - "Embedding model set to \"{{embedding_model}}\"": "Manustamise mudel määratud kui \"{{embedding_model}}\"", - "Enable API Key": "Luba API võti", - "Enable autocomplete generation for chat messages": "Luba automaattäitmise genereerimine vestlussõnumitele", - "Enable Code Execution": "Luba koodi täitmine", - "Enable Code Interpreter": "Luba koodi interpretaator", - "Enable Community Sharing": "Luba kogukonnaga jagamine", - "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Luba mälu lukustamine (mlock), et vältida mudeli andmete vahetamist RAM-ist välja. See valik lukustab mudeli töökomplekti lehed RAM-i, tagades, et neid ei vahetata kettale. See aitab säilitada jõudlust, vältides lehevigu ja tagades kiire andmete juurdepääsu.", - "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Luba mälu kaardistamine (mmap) mudeli andmete laadimiseks. See valik võimaldab süsteemil kasutada kettamahtu RAM-i laiendusena, koheldes kettafaile nii, nagu need oleksid RAM-is. See võib parandada mudeli jõudlust, võimaldades kiiremat andmete juurdepääsu. See ei pruugi siiski kõigi süsteemidega õigesti töötada ja võib tarbida märkimisväärse koguse kettaruumi.", - "Enable Message Rating": "Luba sõnumite hindamine", - "Enable Mirostat sampling for controlling perplexity.": "Luba Mirostat'i valim perplekssuse juhtimiseks.", - "Enable New Sign Ups": "Luba uued registreerimised", - "Enabled": "Lubatud", - "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Veenduge, et teie CSV-fail sisaldab 4 veergu selles järjekorras: Nimi, E-post, Parool, Roll.", - "Enter {{role}} message here": "Sisestage {{role}} sõnum siia", - "Enter a detail about yourself for your LLMs to recall": "Sisestage detail enda kohta, mida teie LLM-id saavad meenutada", - "Enter api auth string (e.g. username:password)": "Sisestage api autentimisstring (nt kasutajanimi:parool)", - "Enter Application DN": "Sisestage rakenduse DN", - "Enter Application DN Password": "Sisestage rakenduse DN parool", - "Enter Bing Search V7 Endpoint": "Sisestage Bing Search V7 lõpp-punkt", - "Enter Bing Search V7 Subscription Key": "Sisestage Bing Search V7 tellimuse võti", - "Enter Bocha Search API Key": "Sisestage Bocha Search API võti", - "Enter Brave Search API Key": "Sisestage Brave Search API võti", - "Enter certificate path": "Sisestage sertifikaadi tee", - "Enter CFG Scale (e.g. 7.0)": "Sisestage CFG skaala (nt 7.0)", - "Enter Chunk Overlap": "Sisestage tükkide ülekate", - "Enter Chunk Size": "Sisestage tüki suurus", - "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Sisestage komadega eraldatud \"token:kallutuse_väärtus\" paarid (näide: 5432:100, 413:-100)", - "Enter description": "Sisestage kirjeldus", - "Enter Document Intelligence Endpoint": "Sisestage dokumendi intelligentsuse lõpp-punkt", - "Enter Document Intelligence Key": "Sisestage dokumendi intelligentsuse võti", - "Enter domains separated by commas (e.g., example.com,site.org)": "Sisestage domeenid komadega eraldatult (nt example.com,site.org)", - "Enter Exa API Key": "Sisestage Exa API võti", - "Enter Github Raw URL": "Sisestage Github toorURL", - "Enter Google PSE API Key": "Sisestage Google PSE API võti", - "Enter Google PSE Engine Id": "Sisestage Google PSE mootori ID", - "Enter Image Size (e.g. 512x512)": "Sisestage pildi suurus (nt 512x512)", - "Enter Jina API Key": "Sisestage Jina API võti", - "Enter Jupyter Password": "Sisestage Jupyter parool", - "Enter Jupyter Token": "Sisestage Jupyter token", - "Enter Jupyter URL": "Sisestage Jupyter URL", - "Enter Kagi Search API Key": "Sisestage Kagi Search API võti", - "Enter Key Behavior": "Sisestage võtme käitumine", - "Enter language codes": "Sisestage keelekoodid", - "Enter Model ID": "Sisestage mudeli ID", - "Enter model tag (e.g. {{modelTag}})": "Sisestage mudeli silt (nt {{modelTag}})", - "Enter Mojeek Search API Key": "Sisestage Mojeek Search API võti", - "Enter Number of Steps (e.g. 50)": "Sisestage sammude arv (nt 50)", - "Enter Perplexity API Key": "Sisestage Perplexity API võti", - "Enter proxy URL (e.g. https://user:password@host:port)": "Sisestage puhverserveri URL (nt https://kasutaja:parool@host:port)", - "Enter reasoning effort": "Sisestage arutluspingutus", - "Enter Sampler (e.g. Euler a)": "Sisestage valimismeetod (nt Euler a)", - "Enter Scheduler (e.g. Karras)": "Sisestage planeerija (nt Karras)", - "Enter Score": "Sisestage skoor", - "Enter SearchApi API Key": "Sisestage SearchApi API võti", - "Enter SearchApi Engine": "Sisestage SearchApi mootor", - "Enter Searxng Query URL": "Sisestage Searxng päringu URL", - "Enter Seed": "Sisestage seeme", - "Enter SerpApi API Key": "Sisestage SerpApi API võti", - "Enter SerpApi Engine": "Sisestage SerpApi mootor", - "Enter Serper API Key": "Sisestage Serper API võti", - "Enter Serply API Key": "Sisestage Serply API võti", - "Enter Serpstack API Key": "Sisestage Serpstack API võti", - "Enter server host": "Sisestage serveri host", - "Enter server label": "Sisestage serveri silt", - "Enter server port": "Sisestage serveri port", - "Enter stop sequence": "Sisestage lõpetamise järjestus", - "Enter system prompt": "Sisestage süsteemi vihjed", - "Enter Tavily API Key": "Sisestage Tavily API võti", - "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Sisestage oma WebUI avalik URL. Seda URL-i kasutatakse teadaannetes linkide genereerimiseks.", - "Enter Tika Server URL": "Sisestage Tika serveri URL", - "Enter timeout in seconds": "Sisestage aegumine sekundites", - "Enter to Send": "Enter saatmiseks", - "Enter Top K": "Sisestage Top K", - "Enter URL (e.g. http://127.0.0.1:7860/)": "Sisestage URL (nt http://127.0.0.1:7860/)", - "Enter URL (e.g. http://localhost:11434)": "Sisestage URL (nt http://localhost:11434)", - "Enter your current password": "Sisestage oma praegune parool", - "Enter Your Email": "Sisestage oma e-post", - "Enter Your Full Name": "Sisestage oma täisnimi", - "Enter your message": "Sisestage oma sõnum", - "Enter your new password": "Sisestage oma uus parool", - "Enter Your Password": "Sisestage oma parool", - "Enter Your Role": "Sisestage oma roll", - "Enter Your Username": "Sisestage oma kasutajanimi", - "Enter your webhook URL": "Sisestage oma webhook URL", - "Error": "Viga", - "ERROR": "VIGA", - "Error accessing Google Drive: {{error}}": "Viga Google Drive'i juurdepääsul: {{error}}", - "Error uploading file: {{error}}": "Viga faili üleslaadimisel: {{error}}", - "Evaluations": "Hindamised", - "Exa API Key": "Exa API võti", - "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Näide: (&(objectClass=inetOrgPerson)(uid=%s))", - "Example: ALL": "Näide: ALL", - "Example: mail": "Näide: mail", - "Example: ou=users,dc=foo,dc=example": "Näide: ou=users,dc=foo,dc=example", - "Example: sAMAccountName or uid or userPrincipalName": "Näide: sAMAccountName või uid või userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Ületasite litsentsis määratud istekohtade arvu. Palun võtke ühendust toega, et suurendada istekohtade arvu.", - "Exclude": "Välista", - "Execute code for analysis": "Käivita kood analüüsimiseks", - "Expand": "Laienda", - "Experimental": "Katsetuslik", - "Explain": "Selgita", - "Explain this section to me in more detail": "Selgitage seda lõiku mulle üksikasjalikumalt", - "Explore the cosmos": "Uuri kosmosest", - "Export": "Ekspordi", - "Export All Archived Chats": "Ekspordi kõik arhiveeritud vestlused", - "Export All Chats (All Users)": "Ekspordi kõik vestlused (kõik kasutajad)", - "Export chat (.json)": "Ekspordi vestlus (.json)", - "Export Chats": "Ekspordi vestlused", - "Export Config to JSON File": "Ekspordi seadistus JSON-failina", - "Export Functions": "Ekspordi funktsioonid", - "Export Models": "Ekspordi mudelid", - "Export Presets": "Ekspordi eelseadistused", - "Export Prompts": "Ekspordi vihjed", - "Export to CSV": "Ekspordi CSV-na", - "Export Tools": "Ekspordi tööriistad", - "External Models": "Välised mudelid", - "Failed to add file.": "Faili lisamine ebaõnnestus.", - "Failed to create API Key.": "API võtme loomine ebaõnnestus.", - "Failed to fetch models": "Mudelite toomine ebaõnnestus", - "Failed to read clipboard contents": "Lõikelaua sisu lugemine ebaõnnestus", - "Failed to save models configuration": "Mudelite konfiguratsiooni salvestamine ebaõnnestus", - "Failed to update settings": "Seadete uuendamine ebaõnnestus", - "Failed to upload file.": "Faili üleslaadimine ebaõnnestus.", - "Features": "Funktsioonid", - "Features Permissions": "Funktsioonide õigused", - "February": "Veebruar", - "Feedback History": "Tagasiside ajalugu", - "Feedbacks": "Tagasisided", - "Feel free to add specific details": "Võite lisada konkreetseid üksikasju", - "File": "Fail", - "File added successfully.": "Fail edukalt lisatud.", - "File content updated successfully.": "Faili sisu edukalt uuendatud.", - "File Mode": "Faili režiim", - "File not found.": "Faili ei leitud.", - "File removed successfully.": "Fail edukalt eemaldatud.", - "File size should not exceed {{maxSize}} MB.": "Faili suurus ei tohiks ületada {{maxSize}} MB.", - "File uploaded successfully": "Fail edukalt üles laaditud", - "Files": "Failid", - "Filter is now globally disabled": "Filter on nüüd globaalselt keelatud", - "Filter is now globally enabled": "Filter on nüüd globaalselt lubatud", - "Filters": "Filtrid", - "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "Tuvastati sõrmejälje võltsimine: initsiaalide kasutamine avatarina pole võimalik. Kasutatakse vaikimisi profiilikujutist.", - "Fluidly stream large external response chunks": "Suurte väliste vastuste tükkide sujuv voogedastus", - "Focus chat input": "Fokuseeri vestluse sisendile", - "Folder deleted successfully": "Kaust edukalt kustutatud", - "Folder name cannot be empty": "Kausta nimi ei saa olla tühi", - "Folder name cannot be empty.": "Kausta nimi ei saa olla tühi.", - "Folder name updated successfully": "Kausta nimi edukalt uuendatud", - "Followed instructions perfectly": "Järgis juhiseid täiuslikult", - "Forge new paths": "Loo uusi radu", - "Form": "Vorm", - "Format your variables using brackets like this:": "Vormindage oma muutujad sulgudega nagu siin:", - "Frequency Penalty": "Sageduse karistus", - "Full Context Mode": "Täiskonteksti režiim", - "Function": "Funktsioon", - "Function Calling": "Funktsiooni kutsumine", - "Function created successfully": "Funktsioon edukalt loodud", - "Function deleted successfully": "Funktsioon edukalt kustutatud", - "Function Description": "Funktsiooni kirjeldus", - "Function ID": "Funktsiooni ID", - "Function is now globally disabled": "Funktsioon on nüüd globaalselt keelatud", - "Function is now globally enabled": "Funktsioon on nüüd globaalselt lubatud", - "Function Name": "Funktsiooni nimi", - "Function updated successfully": "Funktsioon edukalt uuendatud", - "Functions": "Funktsioonid", - "Functions allow arbitrary code execution": "Funktsioonid võimaldavad suvalise koodi käivitamist", - "Functions allow arbitrary code execution.": "Funktsioonid võimaldavad suvalise koodi käivitamist.", - "Functions imported successfully": "Funktsioonid edukalt imporditud", - "Gemini": "Gemini", - "Gemini API Config": "Gemini API seadistus", - "Gemini API Key is required.": "Gemini API võti on nõutav.", - "General": "Üldine", - "Generate an image": "Genereeri pilt", - "Generate Image": "Genereeri pilt", - "Generate prompt pair": "Genereeri vihjete paar", - "Generating search query": "Otsinguküsimuse genereerimine", - "Get started": "Alusta", - "Get started with {{WEBUI_NAME}}": "Alusta {{WEBUI_NAME}} kasutamist", - "Global": "Globaalne", - "Good Response": "Hea vastus", - "Google Drive": "Google Drive", - "Google PSE API Key": "Google PSE API võti", - "Google PSE Engine Id": "Google PSE mootori ID", - "Group created successfully": "Grupp edukalt loodud", - "Group deleted successfully": "Grupp edukalt kustutatud", - "Group Description": "Grupi kirjeldus", - "Group Name": "Grupi nimi", - "Group updated successfully": "Grupp edukalt uuendatud", - "Groups": "Grupid", - "Haptic Feedback": "Haptiline tagasiside", - "has no conversations.": "vestlused puuduvad.", - "Hello, {{name}}": "Tere, {{name}}", - "Help": "Abi", - "Help us create the best community leaderboard by sharing your feedback history!": "Aidake meil luua parim kogukonna edetabel, jagades oma tagasiside ajalugu!", - "Hex Color": "Hex värv", - "Hex Color - Leave empty for default color": "Hex värv - jätke tühjaks vaikevärvi jaoks", - "Hide": "Peida", - "Home": "Avaleht", - "Host": "Host", - "How can I help you today?": "Kuidas saan teid täna aidata?", - "How would you rate this response?": "Kuidas hindaksite seda vastust?", - "Hybrid Search": "Hübriidotsing", - "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Kinnitan, et olen lugenud ja mõistan oma tegevuse tagajärgi. Olen teadlik suvalise koodi käivitamisega seotud riskidest ja olen kontrollinud allika usaldusväärsust.", - "ID": "ID", - "Ignite curiosity": "Süüta uudishimu", - "Image": "Pilt", - "Image Compression": "Pildi tihendamine", - "Image Generation": "Pildi genereerimine", - "Image Generation (Experimental)": "Pildi genereerimine (katsetuslik)", - "Image Generation Engine": "Pildi genereerimise mootor", - "Image Max Compression Size": "Pildi maksimaalne tihendamise suurus", - "Image Prompt Generation": "Pildi vihje genereerimine", - "Image Prompt Generation Prompt": "Pildi vihje genereerimise vihje", - "Image Settings": "Pildi seaded", - "Images": "Pildid", - "Import Chats": "Impordi vestlused", - "Import Config from JSON File": "Impordi seadistus JSON-failist", - "Import Functions": "Impordi funktsioonid", - "Import Models": "Impordi mudelid", - "Import Presets": "Impordi eelseadistused", - "Import Prompts": "Impordi vihjed", - "Import Tools": "Impordi tööriistad", - "Include": "Kaasa", - "Include `--api-auth` flag when running stable-diffusion-webui": "Lisage `--api-auth` lipp stable-diffusion-webui käivitamisel", - "Include `--api` flag when running stable-diffusion-webui": "Lisage `--api` lipp stable-diffusion-webui käivitamisel", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Mõjutab, kui kiiresti algoritm reageerib genereeritud teksti tagasisidele. Madalam õppimiskiirus annab tulemuseks aeglasemad kohandused, samas kui kõrgem õppimiskiirus muudab algoritmi tundlikumaks.", - "Info": "Info", - "Input commands": "Sisendkäsud", - "Install from Github URL": "Installige Github URL-ilt", - "Instant Auto-Send After Voice Transcription": "Kohene automaatne saatmine pärast hääle transkriptsiooni", - "Integration": "Integratsioon", - "Interface": "Kasutajaliides", - "Invalid file format.": "Vigane failiformaat.", - "Invalid Tag": "Vigane silt", - "is typing...": "kirjutab...", - "January": "Jaanuar", - "Jina API Key": "Jina API võti", - "join our Discord for help.": "liituge abi saamiseks meie Discordiga.", - "JSON": "JSON", - "JSON Preview": "JSON eelvaade", - "July": "Juuli", - "June": "Juuni", - "Jupyter Auth": "Jupyter autentimine", - "Jupyter URL": "Jupyter URL", - "JWT Expiration": "JWT aegumine", - "JWT Token": "JWT token", - "Kagi Search API Key": "Kagi Search API võti", - "Keep Alive": "Hoia elus", - "Key": "Võti", - "Keyboard shortcuts": "Klaviatuuri otseteed", - "Knowledge": "Teadmised", - "Knowledge Access": "Teadmiste juurdepääs", - "Knowledge created successfully.": "Teadmised edukalt loodud.", - "Knowledge deleted successfully.": "Teadmised edukalt kustutatud.", - "Knowledge reset successfully.": "Teadmised edukalt lähtestatud.", - "Knowledge updated successfully": "Teadmised edukalt uuendatud", - "Kokoro.js (Browser)": "Kokoro.js (brauser)", - "Kokoro.js Dtype": "Kokoro.js andmetüüp", - "Label": "Silt", - "Landing Page Mode": "Maandumislehe režiim", - "Language": "Keel", - "Last Active": "Viimati aktiivne", - "Last Modified": "Viimati muudetud", - "Last reply": "Viimane vastus", - "LDAP": "LDAP", - "LDAP server updated": "LDAP server uuendatud", - "Leaderboard": "Edetabel", - "Leave empty for unlimited": "Jäta tühjaks piiranguta kasutamiseks", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/api/tags\" lõpp-punktist", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/models\" lõpp-punktist", - "Leave empty to include all models or select specific models": "Jäta tühjaks, et kaasata kõik mudelid või vali konkreetsed mudelid", - "Leave empty to use the default prompt, or enter a custom prompt": "Jäta tühjaks, et kasutada vaikimisi vihjet, või sisesta kohandatud vihje", - "Leave model field empty to use the default model.": "Jäta mudeli väli tühjaks, et kasutada vaikimisi mudelit.", - "License": "Litsents", - "Light": "Hele", - "Listening...": "Kuulamine...", - "Llama.cpp": "Llama.cpp", - "LLMs can make mistakes. Verify important information.": "LLM-id võivad teha vigu. Kontrollige olulist teavet.", - "Loader": "Laadija", - "Loading Kokoro.js...": "Kokoro.js laadimine...", - "Local": "Kohalik", - "Local Models": "Kohalikud mudelid", - "Location access not allowed": "Asukoha juurdepääs pole lubatud", - "Logit Bias": "Logiti kallutatus", - "Lost": "Kaotanud", - "LTR": "LTR", - "Made by Open WebUI Community": "Loodud Open WebUI kogukonna poolt", - "Make sure to enclose them with": "Veenduge, et need on ümbritsetud järgmisega:", - "Make sure to export a workflow.json file as API format from ComfyUI.": "Veenduge, et ekspordite workflow.json faili API formaadis ComfyUI-st.", - "Manage": "Halda", - "Manage Direct Connections": "Halda otseseid ühendusi", - "Manage Models": "Halda mudeleid", - "Manage Ollama": "Halda Ollama't", - "Manage Ollama API Connections": "Halda Ollama API ühendusi", - "Manage OpenAI API Connections": "Halda OpenAI API ühendusi", - "Manage Pipelines": "Halda torustikke", - "March": "Märts", - "Max Tokens (num_predict)": "Max tokeneid (num_predict)", - "Max Upload Count": "Maksimaalne üleslaadimiste arv", - "Max Upload Size": "Maksimaalne üleslaadimise suurus", - "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Korraga saab alla laadida maksimaalselt 3 mudelit. Palun proovige hiljem uuesti.", - "May": "Mai", - "Memories accessible by LLMs will be shown here.": "LLM-idele ligipääsetavad mälestused kuvatakse siin.", - "Memory": "Mälu", - "Memory added successfully": "Mälu edukalt lisatud", - "Memory cleared successfully": "Mälu edukalt tühjendatud", - "Memory deleted successfully": "Mälu edukalt kustutatud", - "Memory updated successfully": "Mälu edukalt uuendatud", - "Merge Responses": "Ühenda vastused", - "Message rating should be enabled to use this feature": "Selle funktsiooni kasutamiseks peaks sõnumite hindamine olema lubatud", - "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Teie saadetud sõnumeid pärast lingi loomist ei jagata. Kasutajad, kellel on URL, saavad vaadata jagatud vestlust.", - "Min P": "Min P", - "Minimum Score": "Minimaalne skoor", - "Mirostat": "Mirostat", - "Mirostat Eta": "Mirostat Eta", - "Mirostat Tau": "Mirostat Tau", - "Model": "Mudel", - "Model '{{modelName}}' has been successfully downloaded.": "Mudel '{{modelName}}' on edukalt alla laaditud.", - "Model '{{modelTag}}' is already in queue for downloading.": "Mudel '{{modelTag}}' on juba allalaadimise järjekorras.", - "Model {{modelId}} not found": "Mudelit {{modelId}} ei leitud", - "Model {{modelName}} is not vision capable": "Mudel {{modelName}} ei ole võimeline visuaalseid sisendeid töötlema", - "Model {{name}} is now {{status}}": "Mudel {{name}} on nüüd {{status}}", - "Model accepts image inputs": "Mudel võtab vastu pilte sisendina", - "Model created successfully!": "Mudel edukalt loodud!", - "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Tuvastati mudeli failisüsteemi tee. Uuendamiseks on vajalik mudeli lühinimi, ei saa jätkata.", - "Model Filtering": "Mudeli filtreerimine", - "Model ID": "Mudeli ID", - "Model IDs": "Mudeli ID-d", - "Model Name": "Mudeli nimi", - "Model not selected": "Mudel pole valitud", - "Model Params": "Mudeli parameetrid", - "Model Permissions": "Mudeli õigused", - "Model updated successfully": "Mudel edukalt uuendatud", - "Modelfile Content": "Modelfile sisu", - "Models": "Mudelid", - "Models Access": "Mudelite juurdepääs", - "Models configuration saved successfully": "Mudelite seadistus edukalt salvestatud", - "Mojeek Search API Key": "Mojeek Search API võti", - "more": "rohkem", - "More": "Rohkem", - "Name": "Nimi", - "Name your knowledge base": "Nimetage oma teadmiste baas", - "Native": "Omane", - "New Chat": "Uus vestlus", - "New Folder": "Uus kaust", - "New Password": "Uus parool", - "new-channel": "uus-kanal", - "No content found": "Sisu ei leitud", - "No content to speak": "Pole mida rääkida", - "No distance available": "Kaugus pole saadaval", - "No feedbacks found": "Tagasisidet ei leitud", - "No file selected": "Faili pole valitud", - "No files found.": "Faile ei leitud.", - "No groups with access, add a group to grant access": "Puuduvad juurdepääsuõigustega grupid, lisage grupp juurdepääsu andmiseks", - "No HTML, CSS, or JavaScript content found.": "HTML, CSS ega JavaScript sisu ei leitud.", - "No inference engine with management support found": "Järeldusmootorit haldamise toega ei leitud", - "No knowledge found": "Teadmisi ei leitud", - "No memories to clear": "Pole mälestusi, mida kustutada", - "No model IDs": "Mudeli ID-d puuduvad", - "No models found": "Mudeleid ei leitud", - "No models selected": "Mudeleid pole valitud", - "No results found": "Tulemusi ei leitud", - "No search query generated": "Otsingupäringut ei genereeritud", - "No source available": "Allikas pole saadaval", - "No users were found.": "Kasutajaid ei leitud.", - "No valves to update": "Pole klappe, mida uuendada", - "None": "Mitte ühtegi", - "Not factually correct": "Faktiliselt ebakorrektne", - "Not helpful": "Pole abistav", - "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Märkus: kui määrate minimaalse skoori, tagastab otsing ainult dokumendid, mille skoor on suurem või võrdne minimaalse skooriga.", - "Notes": "Märkmed", - "Notification Sound": "Teavituse heli", - "Notification Webhook": "Teavituse webhook", - "Notifications": "Teavitused", - "November": "November", - "num_gpu (Ollama)": "num_gpu (Ollama)", - "num_thread (Ollama)": "num_thread (Ollama)", - "OAuth ID": "OAuth ID", - "October": "Oktoober", - "Off": "Väljas", - "Okay, Let's Go!": "Hea küll, lähme!", - "OLED Dark": "OLED tume", - "Ollama": "Ollama", - "Ollama API": "Ollama API", - "Ollama API settings updated": "Ollama API seaded uuendatud", - "Ollama Version": "Ollama versioon", - "On": "Sees", - "OneDrive": "OneDrive", - "Only alphanumeric characters and hyphens are allowed": "Lubatud on ainult tähtede-numbrite kombinatsioonid ja sidekriipsud", - "Only alphanumeric characters and hyphens are allowed in the command string.": "Käsustringis on lubatud ainult tähtede-numbrite kombinatsioonid ja sidekriipsud.", - "Only collections can be edited, create a new knowledge base to edit/add documents.": "Muuta saab ainult kogusid, dokumentide muutmiseks/lisamiseks looge uus teadmiste baas.", - "Only select users and groups with permission can access": "Juurdepääs on ainult valitud õigustega kasutajatel ja gruppidel", - "Oops! Looks like the URL is invalid. Please double-check and try again.": "Oih! URL tundub olevat vigane. Palun kontrollige ja proovige uuesti.", - "Oops! There are files still uploading. Please wait for the upload to complete.": "Oih! Failide üleslaadimine on veel pooleli. Palun oodake, kuni üleslaadimine lõpeb.", - "Oops! There was an error in the previous response.": "Oih! Eelmises vastuses oli viga.", - "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oih! Kasutate toetamatut meetodit (ainult kasutajaliides). Palun serveerige WebUI tagarakendusest.", - "Open file": "Ava fail", - "Open in full screen": "Ava täisekraanil", - "Open new chat": "Ava uus vestlus", - "Open WebUI uses faster-whisper internally.": "Open WebUI kasutab sisemiselt faster-whisper'it.", - "Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.": "Open WebUI kasutab SpeechT5 ja CMU Arctic kõneleja manustamisi.", - "Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "Open WebUI versioon (v{{OPEN_WEBUI_VERSION}}) on madalam kui nõutav versioon (v{{REQUIRED_VERSION}})", - "OpenAI": "OpenAI", - "OpenAI API": "OpenAI API", - "OpenAI API Config": "OpenAI API seadistus", - "OpenAI API Key is required.": "OpenAI API võti on nõutav.", - "OpenAI API settings updated": "OpenAI API seaded uuendatud", - "OpenAI URL/Key required.": "OpenAI URL/võti on nõutav.", - "or": "või", - "Organize your users": "Korraldage oma kasutajad", - "Other": "Muu", - "OUTPUT": "VÄLJUND", - "Output format": "Väljundformaat", - "Overview": "Ülevaade", - "page": "leht", - "Password": "Parool", - "Paste Large Text as File": "Kleebi suur tekst failina", - "PDF document (.pdf)": "PDF dokument (.pdf)", - "PDF Extract Images (OCR)": "PDF-ist piltide väljavõtmine (OCR)", - "pending": "ootel", - "Permission denied when accessing media devices": "Juurdepääs meediumiseadmetele keelatud", - "Permission denied when accessing microphone": "Juurdepääs mikrofonile keelatud", - "Permission denied when accessing microphone: {{error}}": "Juurdepääs mikrofonile keelatud: {{error}}", - "Permissions": "Õigused", - "Perplexity API Key": "Perplexity API võti", - "Personalization": "Isikupärastamine", - "Pin": "Kinnita", - "Pinned": "Kinnitatud", - "Pioneer insights": "Pioneeri arusaamad", - "Pipeline deleted successfully": "Torustik edukalt kustutatud", - "Pipeline downloaded successfully": "Torustik edukalt alla laaditud", - "Pipelines": "Torustikud", - "Pipelines Not Detected": "Torustikke ei tuvastatud", - "Pipelines Valves": "Torustike klapid", - "Plain text (.txt)": "Lihttekst (.txt)", - "Playground": "Mänguväljak", - "Please carefully review the following warnings:": "Palun vaadake hoolikalt läbi järgmised hoiatused:", - "Please do not close the settings page while loading the model.": "Palun ärge sulgege seadete lehte mudeli laadimise ajal.", - "Please enter a prompt": "Palun sisestage vihje", - "Please fill in all fields.": "Palun täitke kõik väljad.", - "Please select a model first.": "Palun valige esmalt mudel.", - "Please select a model.": "Palun valige mudel.", - "Please select a reason": "Palun valige põhjus", - "Port": "Port", - "Positive attitude": "Positiivne suhtumine", - "Prefix ID": "Prefiksi ID", - "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "Prefiksi ID-d kasutatakse teiste ühendustega konfliktide vältimiseks, lisades mudeli ID-dele prefiksi - jätke tühjaks keelamiseks", - "Presence Penalty": "Kohaloleku karistus", - "Previous 30 days": "Eelmised 30 päeva", - "Previous 7 days": "Eelmised 7 päeva", - "Profile Image": "Profiilipilt", - "Prompt": "Vihje", - "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Vihje (nt Räägi mulle üks huvitav fakt Rooma impeeriumi kohta)", - "Prompt Content": "Vihje sisu", - "Prompt created successfully": "Vihje edukalt loodud", - "Prompt suggestions": "Vihje soovitused", - "Prompt updated successfully": "Vihje edukalt uuendatud", - "Prompts": "Vihjed", - "Prompts Access": "Vihjete juurdepääs", - "Pull \"{{searchValue}}\" from Ollama.com": "Tõmba \"{{searchValue}}\" Ollama.com-ist", - "Pull a model from Ollama.com": "Tõmba mudel Ollama.com-ist", - "Query Generation Prompt": "Päringu genereerimise vihje", - "RAG Template": "RAG mall", - "Rating": "Hinnang", - "Re-rank models by topic similarity": "Järjesta mudelid teema sarnasuse alusel ümber", - "Read": "Loe", - "Read Aloud": "Loe valjult", - "Reasoning Effort": "Arutluspingutus", - "Record voice": "Salvesta hääl", - "Redirecting you to Open WebUI Community": "Suunamine Open WebUI kogukonda", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Vähendab mõttetuste genereerimise tõenäosust. Kõrgem väärtus (nt 100) annab mitmekesisemaid vastuseid, samas kui madalam väärtus (nt 10) on konservatiivsem.", - "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Viita endale kui \"Kasutaja\" (nt \"Kasutaja õpib hispaania keelt\")", - "References from": "Viited allikast", - "Refused when it shouldn't have": "Keeldus, kui ei oleks pidanud", - "Regenerate": "Regenereeri", - "Release Notes": "Väljalaskemärkmed", - "Relevance": "Asjakohasus", - "Remove": "Eemalda", - "Remove Model": "Eemalda mudel", - "Rename": "Nimeta ümber", - "Reorder Models": "Muuda mudelite järjekorda", - "Repeat Last N": "Korda viimast N", - "Repeat Penalty (Ollama)": "Korduse karistus (Ollama)", - "Reply in Thread": "Vasta lõimes", - "Request Mode": "Päringu režiim", - "Reranking Model": "Ümberjärjestamise mudel", - "Reranking model disabled": "Ümberjärjestamise mudel keelatud", - "Reranking model set to \"{{reranking_model}}\"": "Ümberjärjestamise mudel määratud kui \"{{reranking_model}}\"", - "Reset": "Lähtesta", - "Reset All Models": "Lähtesta kõik mudelid", - "Reset Upload Directory": "Lähtesta üleslaadimiste kataloog", - "Reset Vector Storage/Knowledge": "Lähtesta vektormälu/teadmised", - "Reset view": "Lähtesta vaade", - "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Vastuste teavitusi ei saa aktiveerida, kuna veebisaidi õigused on keelatud. Vajalike juurdepääsude andmiseks külastage oma brauseri seadeid.", - "Response splitting": "Vastuse tükeldamine", - "Result": "Tulemus", - "Retrieval": "Taastamine", - "Retrieval Query Generation": "Taastamise päringu genereerimine", - "Rich Text Input for Chat": "Rikasteksti sisend vestluse jaoks", - "RK": "RK", - "Role": "Roll", - "Rosé Pine": "Rosé Pine", - "Rosé Pine Dawn": "Rosé Pine Dawn", - "RTL": "RTL", - "Run": "Käivita", - "Running": "Töötab", - "Save": "Salvesta", - "Save & Create": "Salvesta ja loo", - "Save & Update": "Salvesta ja uuenda", - "Save As Copy": "Salvesta koopiana", - "Save Tag": "Salvesta silt", - "Saved": "Salvestatud", - "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Vestluslogi salvestamine otse teie brauseri mällu pole enam toetatud. Palun võtke hetk, et alla laadida ja kustutada oma vestluslogi, klõpsates allpool olevat nuppu. Ärge muretsege, saate hõlpsasti oma vestluslogi tagarakendusse uuesti importida, kasutades", - "Scroll to bottom when switching between branches": "Keri alla harus liikumisel", - "Search": "Otsing", - "Search a model": "Otsi mudelit", - "Search Base": "Otsingu baas", - "Search Chats": "Otsi vestlusi", - "Search Collection": "Otsi kogust", - "Search Filters": "Otsingu filtrid", - "search for tags": "otsi silte", - "Search Functions": "Otsi funktsioone", - "Search Knowledge": "Otsi teadmisi", - "Search Models": "Otsi mudeleid", - "Search options": "Otsingu valikud", - "Search Prompts": "Otsi vihjeid", - "Search Result Count": "Otsingutulemuste arv", - "Search the internet": "Otsi internetist", - "Search Tools": "Otsi tööriistu", - "SearchApi API Key": "SearchApi API võti", - "SearchApi Engine": "SearchApi mootor", - "Searched {{count}} sites": "Otsiti {{count}} saidilt", - "Searching \"{{searchQuery}}\"": "Otsimine: \"{{searchQuery}}\"", - "Searching Knowledge for \"{{searchQuery}}\"": "Teadmistest otsimine: \"{{searchQuery}}\"", - "Searxng Query URL": "Searxng päringu URL", - "See readme.md for instructions": "Juhiste saamiseks vaadake readme.md", - "See what's new": "Vaata, mis on uut", - "Seed": "Seeme", - "Select a base model": "Valige baas mudel", - "Select a engine": "Valige mootor", - "Select a function": "Valige funktsioon", - "Select a group": "Valige grupp", - "Select a model": "Valige mudel", - "Select a pipeline": "Valige torustik", - "Select a pipeline url": "Valige torustiku URL", - "Select a tool": "Valige tööriist", - "Select an auth method": "Valige autentimismeetod", - "Select an Ollama instance": "Valige Ollama instants", - "Select Engine": "Valige mootor", - "Select Knowledge": "Valige teadmised", - "Select only one model to call": "Valige ainult üks mudel kutsumiseks", - "Selected model(s) do not support image inputs": "Valitud mudel(id) ei toeta pilte sisendina", - "Semantic distance to query": "Semantiline kaugus päringust", - "Send": "Saada", - "Send a Message": "Saada sõnum", - "Send message": "Saada sõnum", - "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Saadab `stream_options: { include_usage: true }` päringus.\nToetatud teenusepakkujad tagastavad määramisel vastuses tokeni kasutuse teabe.", - "September": "September", - "SerpApi API Key": "SerpApi API võti", - "SerpApi Engine": "SerpApi mootor", - "Serper API Key": "Serper API võti", - "Serply API Key": "Serply API võti", - "Serpstack API Key": "Serpstack API võti", - "Server connection verified": "Serveri ühendus kontrollitud", - "Set as default": "Määra vaikimisi", - "Set CFG Scale": "Määra CFG skaala", - "Set Default Model": "Määra vaikimisi mudel", - "Set embedding model": "Määra manustamise mudel", - "Set embedding model (e.g. {{model}})": "Määra manustamise mudel (nt {{model}})", - "Set Image Size": "Määra pildi suurus", - "Set reranking model (e.g. {{model}})": "Määra ümberjärjestamise mudel (nt {{model}})", - "Set Sampler": "Määra valimismeetod", - "Set Scheduler": "Määra planeerija", - "Set Steps": "Määra sammud", - "Set Task Model": "Määra ülesande mudel", - "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Määrake kihtide arv, mis laaditakse GPU-le. Selle väärtuse suurendamine võib oluliselt parandada jõudlust mudelite puhul, mis on optimeeritud GPU kiirenduse jaoks, kuid võib tarbida rohkem energiat ja GPU ressursse.", - "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Määrake arvutusteks kasutatavate töölõimede arv. See valik kontrollib, mitu lõime kasutatakse saabuvate päringute samaaegseks töötlemiseks. Selle väärtuse suurendamine võib parandada jõudlust suure samaaegsusega töökoormuste korral, kuid võib tarbida rohkem CPU ressursse.", - "Set Voice": "Määra hääl", - "Set whisper model": "Määra whisper mudel", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab tasase kallutatuse tokenite vastu, mis on esinenud vähemalt üks kord. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab skaleeritava kallutatuse tokenite vastu korduste karistamiseks, põhinedes sellel, mitu korda need on esinenud. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", - "Sets how far back for the model to look back to prevent repetition.": "Määrab, kui kaugele mudel tagasi vaatab, et vältida kordusi.", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Määrab genereerimiseks kasutatava juhusliku arvu seemne. Selle määramine kindlale numbrile paneb mudeli genereerima sama teksti sama vihje korral.", - "Sets the size of the context window used to generate the next token.": "Määrab järgmise tokeni genereerimiseks kasutatava konteksti akna suuruse.", - "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Määrab kasutatavad lõpetamise järjestused. Kui see muster kohatakse, lõpetab LLM teksti genereerimise ja tagastab. Mitme lõpetamise mustri saab määrata, täpsustades modelfile'is mitu eraldi lõpetamise parameetrit.", - "Settings": "Seaded", - "Settings saved successfully!": "Seaded edukalt salvestatud!", - "Share": "Jaga", - "Share Chat": "Jaga vestlust", - "Share to Open WebUI Community": "Jaga Open WebUI kogukonnaga", - "Show": "Näita", - "Show \"What's New\" modal on login": "Näita \"Mis on uut\" modaalakent sisselogimisel", - "Show Admin Details in Account Pending Overlay": "Näita administraatori üksikasju konto ootel kattekihil", - "Show shortcuts": "Näita otseteid", - "Show your support!": "Näita oma toetust!", - "Showcased creativity": "Näitas loovust", - "Sign in": "Logi sisse", - "Sign in to {{WEBUI_NAME}}": "Logi sisse {{WEBUI_NAME}}", - "Sign in to {{WEBUI_NAME}} with LDAP": "Logi sisse {{WEBUI_NAME}} LDAP-ga", - "Sign Out": "Logi välja", - "Sign up": "Registreeru", - "Sign up to {{WEBUI_NAME}}": "Registreeru {{WEBUI_NAME}}", - "Signing in to {{WEBUI_NAME}}": "Sisselogimine {{WEBUI_NAME}}", - "sk-1234": "sk-1234", - "Source": "Allikas", - "Speech Playback Speed": "Kõne taasesituse kiirus", - "Speech recognition error: {{error}}": "Kõnetuvastuse viga: {{error}}", - "Speech-to-Text Engine": "Kõne-tekstiks mootor", - "Stop": "Peata", - "Stop Sequence": "Lõpetamise järjestus", - "Stream Chat Response": "Voogedasta vestluse vastust", - "STT Model": "STT mudel", - "STT Settings": "STT seaded", - "Subtitle (e.g. about the Roman Empire)": "Alampealkiri (nt Rooma impeeriumi kohta)", - "Success": "Õnnestus", - "Successfully updated.": "Edukalt uuendatud.", - "Suggested": "Soovitatud", - "Support": "Tugi", - "Support this plugin:": "Toeta seda pistikprogrammi:", - "Sync directory": "Sünkroniseeri kataloog", - "System": "Süsteem", - "System Instructions": "Süsteemi juhised", - "System Prompt": "Süsteemi vihje", - "Tags Generation": "Siltide genereerimine", - "Tags Generation Prompt": "Siltide genereerimise vihje", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Saba vaba valimit kasutatakse väljundis vähem tõenäoliste tokenite mõju vähendamiseks. Kõrgem väärtus (nt 2,0) vähendab mõju rohkem, samas kui väärtus 1,0 keelab selle seade.", - "Talk to model": "Räägi mudeliga", - "Tap to interrupt": "Puuduta katkestamiseks", - "Tasks": "Ülesanded", - "Tavily API Key": "Tavily API võti", - "Tell us more:": "Räägi meile lähemalt:", - "Temperature": "Temperatuur", - "Template": "Mall", - "Temporary Chat": "Ajutine vestlus", - "Text Splitter": "Teksti tükeldaja", - "Text-to-Speech Engine": "Tekst-kõneks mootor", - "Tfs Z": "Tfs Z", - "Thanks for your feedback!": "Täname tagasiside eest!", - "The Application Account DN you bind with for search": "Rakenduse konto DN, millega seote otsingu jaoks", - "The base to search for users": "Baas kasutajate otsimiseks", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Partii suurus määrab, mitu tekstipäringut töödeldakse korraga. Suurem partii suurus võib suurendada mudeli jõudlust ja kiirust, kuid see nõuab ka rohkem mälu.", - "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Selle pistikprogrammi taga olevad arendajad on kogukonna pühendunud vabatahtlikud. Kui leiate, et see pistikprogramm on kasulik, palun kaaluge selle arendamise toetamist.", - "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hindamise edetabel põhineb Elo hindamissüsteemil ja seda uuendatakse reaalajas.", - "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP atribuut, mis kaardistab e-posti, mida kasutajad kasutavad sisselogimiseks.", - "The LDAP attribute that maps to the username that users use to sign in.": "LDAP atribuut, mis kaardistab kasutajanime, mida kasutajad kasutavad sisselogimiseks.", - "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Edetabel on praegu beetaversioonina ja me võime kohandada hindamisarvutusi algoritmi täiustamisel.", - "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksimaalne failisuurus MB-des. Kui failisuurus ületab seda piiri, faili ei laadita üles.", - "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksimaalne failide arv, mida saab korraga vestluses kasutada. Kui failide arv ületab selle piiri, faile ei laadita üles.", - "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skoor peaks olema väärtus vahemikus 0,0 (0%) kuni 1,0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "Mudeli temperatuur. Temperatuuri suurendamine paneb mudeli vastama loovamalt.", - "Theme": "Teema", - "Thinking...": "Mõtleb...", - "This action cannot be undone. Do you wish to continue?": "Seda toimingut ei saa tagasi võtta. Kas soovite jätkata?", - "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "See tagab, et teie väärtuslikud vestlused salvestatakse turvaliselt teie tagarakenduse andmebaasi. Täname!", - "This is an experimental feature, it may not function as expected and is subject to change at any time.": "See on katsetuslik funktsioon, see ei pruugi toimida ootuspäraselt ja võib igal ajal muutuda.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "See valik kontrollib, mitu tokenit säilitatakse konteksti värskendamisel. Näiteks kui see on määratud 2-le, säilitatakse vestluse konteksti viimased 2 tokenit. Konteksti säilitamine võib aidata säilitada vestluse järjepidevust, kuid võib vähendada võimet reageerida uutele teemadele.", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "See valik määrab maksimaalse tokenite arvu, mida mudel saab oma vastuses genereerida. Selle piirmäära suurendamine võimaldab mudelil anda pikemaid vastuseid, kuid võib suurendada ka ebavajaliku või ebaolulise sisu genereerimise tõenäosust.", - "This option will delete all existing files in the collection and replace them with newly uploaded files.": "See valik kustutab kõik olemasolevad failid kogust ja asendab need äsja üleslaaditud failidega.", - "This response was generated by \"{{model}}\"": "Selle vastuse genereeris \"{{model}}\"", - "This will delete": "See kustutab", - "This will delete {{NAME}} and all its contents.": "See kustutab {{NAME}} ja kogu selle sisu.", - "This will delete all models including custom models": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid", - "This will delete all models including custom models and cannot be undone.": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid, ja seda ei saa tagasi võtta.", - "This will reset the knowledge base and sync all files. Do you wish to continue?": "See lähtestab teadmiste baasi ja sünkroniseerib kõik failid. Kas soovite jätkata?", - "Thorough explanation": "Põhjalik selgitus", - "Thought for {{DURATION}}": "Mõtles {{DURATION}}", - "Thought for {{DURATION}} seconds": "Mõtles {{DURATION}} sekundit", - "Tika": "Tika", - "Tika Server URL required.": "Tika serveri URL on nõutav.", - "Tiktoken": "Tiktoken", - "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Nõuanne: Värskendage mitut muutuja kohta järjestikku, vajutades pärast iga asendust vestluse sisendis tabeldusklahvi.", - "Title": "Pealkiri", - "Title (e.g. Tell me a fun fact)": "Pealkiri (nt Räägi mulle üks huvitav fakt)", - "Title Auto-Generation": "Pealkirja automaatne genereerimine", - "Title cannot be an empty string.": "Pealkiri ei saa olla tühi string.", - "Title Generation": "Pealkirja genereerimine", - "Title Generation Prompt": "Pealkirja genereerimise vihje", - "TLS": "TLS", - "To access the available model names for downloading,": "Juurdepääsuks saadaolevatele mudelinimedele allalaadimiseks,", - "To access the GGUF models available for downloading,": "Juurdepääsuks allalaadimiseks saadaolevatele GGUF mudelitele,", - "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "WebUI-le juurdepääsuks võtke ühendust administraatoriga. Administraatorid saavad hallata kasutajate staatuseid administraatori paneelist.", - "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Teadmiste baasi siia lisamiseks lisage need esmalt \"Teadmiste\" tööalale.", - "To learn more about available endpoints, visit our documentation.": "Saadaolevate lõpp-punktide kohta rohkem teada saamiseks külastage meie dokumentatsiooni.", - "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Teie privaatsuse kaitsmiseks jagatakse teie tagasisidest ainult hinnanguid, mudeli ID-sid, silte ja metaandmeid - teie vestluslogi jääb privaatseks ja neid ei kaasata.", - "To select actions here, add them to the \"Functions\" workspace first.": "Toimingute siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", - "To select filters here, add them to the \"Functions\" workspace first.": "Filtrite siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", - "To select toolkits here, add them to the \"Tools\" workspace first.": "Tööriistakomplektide siit valimiseks lisage need esmalt \"Tööriistade\" tööalale.", - "Toast notifications for new updates": "Hüpikmärguanded uuenduste kohta", - "Today": "Täna", - "Toggle settings": "Lülita seaded", - "Toggle sidebar": "Lülita külgriba", - "Token": "Token", - "Tokens To Keep On Context Refresh (num_keep)": "Konteksti värskendamisel säilitatavad tokenid (num_keep)", - "Too verbose": "Liiga paljusõnaline", - "Tool created successfully": "Tööriist edukalt loodud", - "Tool deleted successfully": "Tööriist edukalt kustutatud", - "Tool Description": "Tööriista kirjeldus", - "Tool ID": "Tööriista ID", - "Tool imported successfully": "Tööriist edukalt imporditud", - "Tool Name": "Tööriista nimi", - "Tool updated successfully": "Tööriist edukalt uuendatud", - "Tools": "Tööriistad", - "Tools Access": "Tööriistade juurdepääs", - "Tools are a function calling system with arbitrary code execution": "Tööriistad on funktsioonide kutsumise süsteem suvalise koodi täitmisega", - "Tools Function Calling Prompt": "Tööriistade funktsioonide kutsumise vihje", - "Tools have a function calling system that allows arbitrary code execution": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist", - "Tools have a function calling system that allows arbitrary code execution.": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist.", - "Top K": "Top K", - "Top P": "Top P", - "Transformers": "Transformers", - "Trouble accessing Ollama?": "Probleeme Ollama juurdepääsuga?", - "Trust Proxy Environment": "Usalda puhverserveri keskkonda", - "TTS Model": "TTS mudel", - "TTS Settings": "TTS seaded", - "TTS Voice": "TTS hääl", - "Type": "Tüüp", - "Type Hugging Face Resolve (Download) URL": "Sisestage Hugging Face Resolve (Allalaadimise) URL", - "Uh-oh! There was an issue with the response.": "Oi-oi! Vastusega oli probleem.", - "UI": "Kasutajaliides", - "Unarchive All": "Eemalda kõik arhiivist", - "Unarchive All Archived Chats": "Eemalda kõik arhiveeritud vestlused arhiivist", - "Unarchive Chat": "Eemalda vestlus arhiivist", - "Unlock mysteries": "Ava mõistatused", - "Unpin": "Võta lahti", - "Unravel secrets": "Ava saladused", - "Untagged": "Sildistamata", - "Update": "Uuenda", - "Update and Copy Link": "Uuenda ja kopeeri link", - "Update for the latest features and improvements.": "Uuendage, et saada uusimad funktsioonid ja täiustused.", - "Update password": "Uuenda parooli", - "Updated": "Uuendatud", - "Updated at": "Uuendamise aeg", - "Updated At": "Uuendamise aeg", - "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Uuendage litsentseeritud plaanile täiustatud võimaluste jaoks, sealhulgas kohandatud teemad ja bränding ning pühendatud tugi.", - "Upload": "Laadi üles", - "Upload a GGUF model": "Laadige üles GGUF mudel", - "Upload directory": "Üleslaadimise kataloog", - "Upload files": "Laadi failid üles", - "Upload Files": "Laadi failid üles", - "Upload Pipeline": "Laadi torustik üles", - "Upload Progress": "Üleslaadimise progress", - "URL": "URL", - "URL Mode": "URL režiim", - "Use '#' in the prompt input to load and include your knowledge.": "Kasutage '#' vihjete sisendis, et laadida ja kaasata oma teadmised.", - "Use Gravatar": "Kasuta Gravatari", - "Use groups to group your users and assign permissions.": "Kasutage gruppe oma kasutajate grupeerimiseks ja õiguste määramiseks.", - "Use Initials": "Kasuta initsiaale", - "use_mlock (Ollama)": "use_mlock (Ollama)", - "use_mmap (Ollama)": "use_mmap (Ollama)", - "user": "kasutaja", - "User": "Kasutaja", - "User location successfully retrieved.": "Kasutaja asukoht edukalt hangitud.", - "Username": "Kasutajanimi", - "Users": "Kasutajad", - "Using the default arena model with all models. Click the plus button to add custom models.": "Kasutatakse vaikimisi areena mudelit kõigi mudelitega. Kohandatud mudelite lisamiseks klõpsake plussmärgiga nuppu.", - "Utilize": "Kasuta", - "Valid time units:": "Kehtivad ajaühikud:", - "Valves": "Klapid", - "Valves updated": "Klapid uuendatud", - "Valves updated successfully": "Klapid edukalt uuendatud", - "variable": "muutuja", - "variable to have them replaced with clipboard content.": "muutuja, et need asendataks lõikelaua sisuga.", - "Version": "Versioon", - "Version {{selectedVersion}} of {{totalVersions}}": "Versioon {{selectedVersion}} / {{totalVersions}}", - "View Replies": "Vaata vastuseid", - "Visibility": "Nähtavus", - "Voice": "Hääl", - "Voice Input": "Hääle sisend", - "Warning": "Hoiatus", - "Warning:": "Hoiatus:", - "Warning: Enabling this will allow users to upload arbitrary code on the server.": "Hoiatus: Selle lubamine võimaldab kasutajatel üles laadida suvalist koodi serverisse.", - "Warning: If you update or change your embedding model, you will need to re-import all documents.": "Hoiatus: Kui uuendate või muudate oma manustamise mudelit, peate kõik dokumendid uuesti importima.", - "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Hoiatus: Jupyter täitmine võimaldab suvalise koodi käivitamist, mis kujutab endast tõsist turvariski - jätkake äärmise ettevaatusega.", - "Web": "Veeb", - "Web API": "Veebi API", - "Web Search": "Veebiotsing", - "Web Search Engine": "Veebi otsingumootor", - "Web Search in Chat": "Veebiotsing vestluses", - "Web Search Query Generation": "Veebi otsingupäringu genereerimine", - "Webhook URL": "Webhooki URL", - "WebUI Settings": "WebUI seaded", - "WebUI URL": "WebUI URL", - "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI teeb päringuid aadressile \"{{url}}/api/chat\"", - "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI teeb päringuid aadressile \"{{url}}/chat/completions\"", - "What are you trying to achieve?": "Mida te püüate saavutada?", - "What are you working on?": "Millega te tegelete?", - "What’s New in": "Mis on uut", - "When enabled, the model will respond to each chat message in real-time, generating a response as soon as the user sends a message. This mode is useful for live chat applications, but may impact performance on slower hardware.": "Kui see on lubatud, vastab mudel igale vestlussõnumile reaalajas, genereerides vastuse niipea, kui kasutaja sõnumi saadab. See režiim on kasulik reaalajas vestlusrakendustes, kuid võib mõjutada jõudlust aeglasema riistvara puhul.", - "wherever you are": "kus iganes te olete", - "Whisper (Local)": "Whisper (lokaalne)", - "Why?": "Miks?", - "Widescreen Mode": "Laiekraani režiim", - "Won": "Võitis", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Töötab koos top-k-ga. Kõrgem väärtus (nt 0,95) annab tulemuseks mitmekesisema teksti, samas kui madalam väärtus (nt 0,5) genereerib keskendunuma ja konservatiivsema teksti.", - "Workspace": "Tööala", - "Workspace Permissions": "Tööala õigused", - "Write": "Kirjuta", - "Write a prompt suggestion (e.g. Who are you?)": "Kirjutage vihje soovitus (nt Kes sa oled?)", - "Write a summary in 50 words that summarizes [topic or keyword].": "Kirjutage 50-sõnaline kokkuvõte, mis võtab kokku [teema või märksõna].", - "Write something...": "Kirjutage midagi...", - "Write your model template content here": "Kirjutage oma mudeli malli sisu siia", - "Yesterday": "Eile", - "You": "Sina", - "You are currently using a trial license. Please contact support to upgrade your license.": "Kasutate praegu proovilitsentsi. Palun võtke ühendust toega, et oma litsentsi uuendada.", - "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Saate korraga vestelda maksimaalselt {{maxCount}} faili(ga).", - "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Saate isikupärastada oma suhtlust LLM-idega, lisades mälestusi alumise 'Halda' nupu kaudu, muutes need kasulikumaks ja teile kohandatumaks.", - "You cannot upload an empty file.": "Te ei saa üles laadida tühja faili.", - "You do not have permission to access this feature.": "Teil pole õigust sellele funktsioonile ligi pääseda.", - "You do not have permission to upload files": "Teil pole õigust faile üles laadida", - "You do not have permission to upload files.": "Teil pole õigust faile üles laadida.", - "You have no archived conversations.": "Teil pole arhiveeritud vestlusi.", - "You have shared this chat": "Olete seda vestlust jaganud", - "You're a helpful assistant.": "Oled abivalmis assistent.", - "You're now logged in.": "Olete nüüd sisse logitud.", - "Your account status is currently pending activation.": "Teie konto staatus on praegu ootel aktiveerimist.", - "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Kogu teie toetus läheb otse pistikprogrammi arendajale; Open WebUI ei võta mingit protsenti. Kuid valitud rahastamisplatvormil võivad olla oma tasud.", - "Youtube": "Youtube", - "Youtube Language": "Youtube keel", - "Youtube Proxy URL": "Youtube puhverserveri URL" -} \ No newline at end of file + "-1 for no limit, or a positive integer for a specific limit": "-1 piirangu puudumisel või positiivne täisarv konkreetse piirangu jaoks", + "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' või '-1' aegumiseta.", + "(e.g. `sh webui.sh --api --api-auth username_password`)": "(nt `sh webui.sh --api --api-auth kasutajanimi_parool`)", + "(e.g. `sh webui.sh --api`)": "(nt `sh webui.sh --api`)", + "(latest)": "(uusim)", + "{{ models }}": "{{ mudelid }}", + "{{COUNT}} hidden lines": "{{COUNT}} peidetud rida", + "{{COUNT}} Replies": "{{COUNT}} vastust", + "{{user}}'s Chats": "{{user}} vestlused", + "{{webUIName}} Backend Required": "{{webUIName}} taustaserver on vajalik", + "*Prompt node ID(s) are required for image generation": "*Vihje sõlme ID(d) on piltide genereerimiseks vajalikud", + "A new version (v{{LATEST_VERSION}}) is now available.": "Uus versioon (v{{LATEST_VERSION}}) on saadaval.", + "A task model is used when performing tasks such as generating titles for chats and web search queries": "Ülesande mudelit kasutatakse selliste toimingute jaoks nagu vestluste pealkirjade ja veebiotsingu päringute genereerimine", + "a user": "kasutaja", + "About": "Teave", + "Accept autocomplete generation / Jump to prompt variable": "Nõustu automaattäitmisega / Liigu vihjete muutujale", + "Access": "Juurdepääs", + "Access Control": "Juurdepääsu kontroll", + "Accessible to all users": "Kättesaadav kõigile kasutajatele", + "Account": "Konto", + "Account Activation Pending": "Konto aktiveerimine ootel", + "Accurate information": "Täpne informatsioon", + "Actions": "Toimingud", + "Activate": "Aktiveeri", + "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Aktiveeri see käsk, trükkides \"/{{COMMAND}}\" vestluse sisendritta.", + "Active Users": "Aktiivsed kasutajad", + "Add": "Lisa", + "Add a model ID": "Lisa mudeli ID", + "Add a short description about what this model does": "Lisa lühike kirjeldus, mida see mudel teeb", + "Add a tag": "Lisa silt", + "Add Arena Model": "Lisa Areena mudel", + "Add Connection": "Lisa ühendus", + "Add Content": "Lisa sisu", + "Add content here": "Lisa siia sisu", + "Add custom prompt": "Lisa kohandatud vihjeid", + "Add Files": "Lisa faile", + "Add Group": "Lisa grupp", + "Add Memory": "Lisa mälu", + "Add Model": "Lisa mudel", + "Add Reaction": "Lisa reaktsioon", + "Add Tag": "Lisa silt", + "Add Tags": "Lisa silte", + "Add text content": "Lisa tekstisisu", + "Add User": "Lisa kasutaja", + "Add User Group": "Lisa kasutajagrupp", + "Adjusting these settings will apply changes universally to all users.": "Nende seadete kohandamine rakendab muudatused universaalselt kõigile kasutajatele.", + "admin": "admin", + "Admin": "Administraator", + "Admin Panel": "Administraatori paneel", + "Admin Settings": "Administraatori seaded", + "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administraatoritel on alati juurdepääs kõigile tööriistadele; kasutajatele tuleb tööriistad määrata mudeli põhiselt tööruumis.", + "Advanced Parameters": "Täpsemad parameetrid", + "Advanced Params": "Täpsemad parameetrid", + "All": "Kõik", + "All Documents": "Kõik dokumendid", + "All models deleted successfully": "Kõik mudelid edukalt kustutatud", + "Allow Chat Controls": "Luba vestluse kontrollnupud", + "Allow Chat Delete": "Luba vestluse kustutamine", + "Allow Chat Deletion": "Luba vestluse kustutamine", + "Allow Chat Edit": "Luba vestluse muutmine", + "Allow File Upload": "Luba failide üleslaadimine", + "Allow non-local voices": "Luba mitte-lokaalsed hääled", + "Allow Temporary Chat": "Luba ajutine vestlus", + "Allow User Location": "Luba kasutaja asukoht", + "Allow Voice Interruption in Call": "Luba hääle katkestamine kõnes", + "Allowed Endpoints": "Lubatud lõpp-punktid", + "Already have an account?": "Kas teil on juba konto?", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternatiiv top_p-le ja eesmärk on tagada kvaliteedi ja mitmekesisuse tasakaal. Parameeter p esindab minimaalset tõenäosust tokeni arvesse võtmiseks, võrreldes kõige tõenäolisema tokeni tõenäosusega. Näiteks p=0.05 korral, kui kõige tõenäolisema tokeni tõenäosus on 0.9, filtreeritakse välja logitid väärtusega alla 0.045.", + "Always": "Alati", + "Always Collapse Code Blocks": "", + "Always Expand Details": "", + "Amazing": "Suurepärane", + "an assistant": "assistent", + "Analyzed": "Analüüsitud", + "Analyzing...": "Analüüsimine...", + "and": "ja", + "and {{COUNT}} more": "ja veel {{COUNT}}", + "and create a new shared link.": "ja looge uus jagatud link.", + "API Base URL": "API baas-URL", + "API Key": "API võti", + "API Key created.": "API võti loodud.", + "API Key Endpoint Restrictions": "API võtme lõpp-punkti piirangud", + "API keys": "API võtmed", + "Application DN": "Rakenduse DN", + "Application DN Password": "Rakenduse DN parool", + "applies to all users with the \"user\" role": "kehtib kõigile kasutajatele \"kasutaja\" rolliga", + "April": "Aprill", + "Archive": "Arhiveeri", + "Archive All Chats": "Arhiveeri kõik vestlused", + "Archived Chats": "Arhiveeritud vestlused", + "archived-chat-export": "arhiveeritud-vestluste-eksport", + "Are you sure you want to clear all memories? This action cannot be undone.": "Kas olete kindel, et soovite kustutada kõik mälestused? Seda toimingut ei saa tagasi võtta.", + "Are you sure you want to delete this channel?": "Kas olete kindel, et soovite selle kanali kustutada?", + "Are you sure you want to delete this message?": "Kas olete kindel, et soovite selle sõnumi kustutada?", + "Are you sure you want to unarchive all archived chats?": "Kas olete kindel, et soovite kõik arhiveeritud vestlused arhiivist eemaldada?", + "Are you sure?": "Kas olete kindel?", + "Arena Models": "Areena mudelid", + "Artifacts": "Tekkinud objektid", + "Ask": "Küsi", + "Ask a question": "Esita küsimus", + "Assistant": "Assistent", + "Attach file from knowledge": "Lisa fail teadmiste baasist", + "Attention to detail": "Tähelepanu detailidele", + "Attribute for Mail": "E-posti atribuut", + "Attribute for Username": "Kasutajanime atribuut", + "Audio": "Heli", + "August": "August", + "Authenticate": "Autendi", + "Authentication": "Autentimine", + "Auto-Copy Response to Clipboard": "Kopeeri vastus automaatselt lõikelauale", + "Auto-playback response": "Mängi vastus automaatselt", + "Autocomplete Generation": "Automaattäitmise genereerimine", + "Autocomplete Generation Input Max Length": "Automaattäitmise genereerimise sisendi maksimaalne pikkus", + "Automatic1111": "Automatic1111", + "AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 API autentimise string", + "AUTOMATIC1111 Base URL": "AUTOMATIC1111 baas-URL", + "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 baas-URL on nõutav.", + "Available list": "Saadaolevate nimekiri", + "available!": "saadaval!", + "Awful": "Kohutav", + "Azure AI Speech": "Azure AI Kõne", + "Azure Region": "Azure regioon", + "Back": "Tagasi", + "Bad Response": "Halb vastus", + "Banners": "Bännerid", + "Base Model (From)": "Baas mudel (Allikas)", + "Batch Size (num_batch)": "Partii suurus (num_batch)", + "before": "enne", + "Being lazy": "Laisklemine", + "Beta": "Beeta", + "Bing Search V7 Endpoint": "Bing Search V7 lõpp-punkt", + "Bing Search V7 Subscription Key": "Bing Search V7 tellimuse võti", + "Bocha Search API Key": "Bocha otsingu API võti", + "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Konkreetsete tokenite võimendamine või karistamine piiratud vastuste jaoks. Kallutatuse väärtused piiratakse vahemikku -100 kuni 100 (kaasa arvatud). (Vaikimisi: puudub)", + "Brave Search API Key": "Brave Search API võti", + "By {{name}}": "Autor: {{name}}", + "Bypass Embedding and Retrieval": "Möödaminek sisestamisest ja taastamisest", + "Bypass SSL verification for Websites": "Möödaminek veebisaitide SSL-kontrollimisest", + "Calendar": "Kalender", + "Call": "Kõne", + "Call feature is not supported when using Web STT engine": "Kõnefunktsioon ei ole Web STT mootorit kasutades toetatud", + "Camera": "Kaamera", + "Cancel": "Tühista", + "Capabilities": "Võimekused", + "Capture": "Jäädvusta", + "Certificate Path": "Sertifikaadi tee", + "Change Password": "Muuda parooli", + "Channel Name": "Kanali nimi", + "Channels": "Kanalid", + "Character": "Tegelane", + "Character limit for autocomplete generation input": "Märkide piirang automaattäitmise genereerimise sisendile", + "Chart new frontiers": "Kaardista uusi piire", + "Chat": "Vestlus", + "Chat Background Image": "Vestluse taustapilt", + "Chat Bubble UI": "Vestlusmullide kasutajaliides", + "Chat Controls": "Vestluse juhtnupud", + "Chat direction": "Vestluse suund", + "Chat Overview": "Vestluse ülevaade", + "Chat Permissions": "Vestluse õigused", + "Chat Tags Auto-Generation": "Vestluse siltide automaatnegeneerimine", + "Chats": "Vestlused", + "Check Again": "Kontrolli uuesti", + "Check for updates": "Kontrolli uuendusi", + "Checking for updates...": "Uuenduste kontrollimine...", + "Choose a model before saving...": "Valige mudel enne salvestamist...", + "Chunk Overlap": "Tükkide ülekate", + "Chunk Size": "Tüki suurus", + "Ciphers": "Šifrid", + "Citation": "Viide", + "Clear memory": "Tühjenda mälu", + "Clear Memory": "Tühjenda mälu", + "click here": "klõpsake siia", + "Click here for filter guides.": "Filtri juhiste jaoks klõpsake siia.", + "Click here for help.": "Abi saamiseks klõpsake siia.", + "Click here to": "Klõpsake siia, et", + "Click here to download user import template file.": "Klõpsake siia kasutajate importimise mallifaili allalaadimiseks.", + "Click here to learn more about faster-whisper and see the available models.": "Klõpsake siia, et teada saada rohkem faster-whisper kohta ja näha saadaolevaid mudeleid.", + "Click here to see available models.": "Klõpsake siia, et näha saadaolevaid mudeleid.", + "Click here to select": "Klõpsake siia valimiseks", + "Click here to select a csv file.": "Klõpsake siia csv-faili valimiseks.", + "Click here to select a py file.": "Klõpsake siia py-faili valimiseks.", + "Click here to upload a workflow.json file.": "Klõpsake siia workflow.json faili üleslaadimiseks.", + "click here.": "klõpsake siia.", + "Click on the user role button to change a user's role.": "Kasutaja rolli muutmiseks klõpsake kasutaja rolli nuppu.", + "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Lõikelaua kirjutamisõigust ei antud. Kontrollige oma brauseri seadeid, et anda vajalik juurdepääs.", + "Clone": "Klooni", + "Clone Chat": "Klooni vestlus", + "Clone of {{TITLE}}": "{{TITLE}} koopia", + "Close": "Sulge", + "Code execution": "Koodi täitmine", + "Code Execution": "Koodi täitmine", + "Code Execution Engine": "Koodi täitmise mootor", + "Code Execution Timeout": "Koodi täitmise aegumine", + "Code formatted successfully": "Kood vormindatud edukalt", + "Code Interpreter": "Koodi interpretaator", + "Code Interpreter Engine": "Koodi interpretaatori mootor", + "Code Interpreter Prompt Template": "Koodi interpretaatori vihje mall", + "Collapse": "Ahenda", + "Collection": "Kogu", + "Color": "Värv", + "ComfyUI": "ComfyUI", + "ComfyUI API Key": "ComfyUI API võti", + "ComfyUI Base URL": "ComfyUI baas-URL", + "ComfyUI Base URL is required.": "ComfyUI baas-URL on nõutav.", + "ComfyUI Workflow": "ComfyUI töövoog", + "ComfyUI Workflow Nodes": "ComfyUI töövoo sõlmed", + "Command": "Käsk", + "Completions": "Lõpetamised", + "Concurrent Requests": "Samaaegsed päringud", + "Configure": "Konfigureeri", + "Confirm": "Kinnita", + "Confirm Password": "Kinnita parool", + "Confirm your action": "Kinnita oma toiming", + "Confirm your new password": "Kinnita oma uus parool", + "Connect to your own OpenAI compatible API endpoints.": "Ühendu oma OpenAI-ga ühilduvate API lõpp-punktidega.", + "Connections": "Ühendused", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Piirab arutluse pingutust arutlusvõimelistele mudelitele. Kohaldatav ainult konkreetsete pakkujate arutlusmudelitele, mis toetavad arutluspingutust.", + "Contact Admin for WebUI Access": "Võtke WebUI juurdepääsu saamiseks ühendust administraatoriga", + "Content": "Sisu", + "Content Extraction Engine": "Sisu ekstraheerimise mootor", + "Context Length": "Konteksti pikkus", + "Continue Response": "Jätka vastust", + "Continue with {{provider}}": "Jätka {{provider}}-ga", + "Continue with Email": "Jätka e-postiga", + "Continue with LDAP": "Jätka LDAP-ga", + "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrolli, kuidas sõnumitekst on jagatud TTS-päringute jaoks. 'Kirjavahemärgid' jagab lauseteks, 'lõigud' jagab lõikudeks ja 'puudub' hoiab sõnumi ühe stringina.", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Kontrollige tokeni järjestuste kordumist genereeritud tekstis. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 1,1) on leebem. Väärtuse 1 korral on see keelatud.", + "Controls": "Juhtnupud", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "Kontrollib väljundi sidususe ja mitmekesisuse vahelist tasakaalu. Madalam väärtus annab tulemuseks fokuseerituma ja sidusamaja teksti.", + "Copied": "Kopeeritud", + "Copied shared chat URL to clipboard!": "Jagatud vestluse URL kopeeritud lõikelauale!", + "Copied to clipboard": "Kopeeritud lõikelauale", + "Copy": "Kopeeri", + "Copy last code block": "Kopeeri viimane koodiplokk", + "Copy last response": "Kopeeri viimane vastus", + "Copy Link": "Kopeeri link", + "Copy to clipboard": "Kopeeri lõikelauale", + "Copying to clipboard was successful!": "Lõikelauale kopeerimine õnnestus!", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "Teenusepakkuja peab nõuetekohaselt konfigureerima CORS-i, et lubada päringuid Open WebUI-lt.", + "Create": "Loo", + "Create a knowledge base": "Loo teadmiste baas", + "Create a model": "Loo mudel", + "Create Account": "Loo konto", + "Create Admin Account": "Loo administraatori konto", + "Create Channel": "Loo kanal", + "Create Group": "Loo grupp", + "Create Knowledge": "Loo teadmised", + "Create new key": "Loo uus võti", + "Create new secret key": "Loo uus salavõti", + "Created at": "Loomise aeg", + "Created At": "Loomise aeg", + "Created by": "Autor", + "CSV Import": "CSV import", + "Ctrl+Enter to Send": "Ctrl+Enter saatmiseks", + "Current Model": "Praegune mudel", + "Current Password": "Praegune parool", + "Custom": "Kohandatud", + "Danger Zone": "Ohutsoon", + "Dark": "Tume", + "Database": "Andmebaas", + "December": "Detsember", + "Default": "Vaikimisi", + "Default (Open AI)": "Vaikimisi (Open AI)", + "Default (SentenceTransformers)": "Vaikimisi (SentenceTransformers)", + "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model’s built-in tool-calling capabilities, but requires the model to inherently support this feature.": "", + "Default Model": "Vaikimisi mudel", + "Default model updated": "Vaikimisi mudel uuendatud", + "Default Models": "Vaikimisi mudelid", + "Default permissions": "Vaikimisi õigused", + "Default permissions updated successfully": "Vaikimisi õigused edukalt uuendatud", + "Default Prompt Suggestions": "Vaikimisi vihjete soovitused", + "Default to 389 or 636 if TLS is enabled": "Vaikimisi 389 või 636, kui TLS on lubatud", + "Default to ALL": "Vaikimisi KÕIK", + "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "", + "Default User Role": "Vaikimisi kasutaja roll", + "Delete": "Kustuta", + "Delete a model": "Kustuta mudel", + "Delete All Chats": "Kustuta kõik vestlused", + "Delete All Models": "Kustuta kõik mudelid", + "Delete chat": "Kustuta vestlus", + "Delete Chat": "Kustuta vestlus", + "Delete chat?": "Kustutada vestlus?", + "Delete folder?": "Kustutada kaust?", + "Delete function?": "Kustutada funktsioon?", + "Delete Message": "Kustuta sõnum", + "Delete message?": "Kustutada sõnum?", + "Delete prompt?": "Kustutada vihjed?", + "delete this link": "kustuta see link", + "Delete tool?": "Kustutada tööriist?", + "Delete User": "Kustuta kasutaja", + "Deleted {{deleteModelTag}}": "Kustutatud {{deleteModelTag}}", + "Deleted {{name}}": "Kustutatud {{name}}", + "Deleted User": "Kustutatud kasutaja", + "Describe your knowledge base and objectives": "Kirjeldage oma teadmiste baasi ja eesmärke", + "Description": "Kirjeldus", + "Didn't fully follow instructions": "Ei järginud täielikult juhiseid", + "Direct": "", + "Direct Connections": "Otsesed ühendused", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Otsesed ühendused võimaldavad kasutajatel ühenduda oma OpenAI-ga ühilduvate API lõpp-punktidega.", + "Direct Connections settings updated": "Otseste ühenduste seaded uuendatud", + "Disabled": "Keelatud", + "Discover a function": "Avasta funktsioon", + "Discover a model": "Avasta mudel", + "Discover a prompt": "Avasta vihje", + "Discover a tool": "Avasta tööriist", + "Discover how to use Open WebUI and seek support from the community.": "Avastage, kuidas kasutada Open WebUI-d ja otsige tuge kogukonnalt.", + "Discover wonders": "Avasta imesid", + "Discover, download, and explore custom functions": "Avasta, laadi alla ja uuri kohandatud funktsioone", + "Discover, download, and explore custom prompts": "Avasta, laadi alla ja uuri kohandatud vihjeid", + "Discover, download, and explore custom tools": "Avasta, laadi alla ja uuri kohandatud tööriistu", + "Discover, download, and explore model presets": "Avasta, laadi alla ja uuri mudeli eelseadistusi", + "Dismissible": "Sulgetav", + "Display": "Kuva", + "Display Emoji in Call": "Kuva kõnes emoji", + "Display the username instead of You in the Chat": "Kuva vestluses 'Sina' asemel kasutajanimi", + "Displays citations in the response": "Kuvab vastuses viited", + "Dive into knowledge": "Sukeldu teadmistesse", + "Do not install functions from sources you do not fully trust.": "Ärge installige funktsioone allikatest, mida te täielikult ei usalda.", + "Do not install tools from sources you do not fully trust.": "Ärge installige tööriistu allikatest, mida te täielikult ei usalda.", + "Docling": "", + "Docling Server URL required.": "", + "Document": "Dokument", + "Document Intelligence": "Dokumendi intelligentsus", + "Document Intelligence endpoint and key required.": "Dokumendi intelligentsuse lõpp-punkt ja võti on nõutavad.", + "Documentation": "Dokumentatsioon", + "Documents": "Dokumendid", + "does not make any external connections, and your data stays securely on your locally hosted server.": "ei loo väliseid ühendusi ja teie andmed jäävad turvaliselt teie kohalikult majutatud serverisse.", + "Domain Filter List": "Domeeni filtri nimekiri", + "Don't have an account?": "Pole kontot?", + "don't install random functions from sources you don't trust.": "ärge installige juhuslikke funktsioone allikatest, mida te ei usalda.", + "don't install random tools from sources you don't trust.": "ärge installige juhuslikke tööriistu allikatest, mida te ei usalda.", + "Don't like the style": "Stiil ei meeldi", + "Done": "Valmis", + "Download": "Laadi alla", + "Download as SVG": "Laadi alla SVG-na", + "Download canceled": "Allalaadimine tühistatud", + "Download Database": "Laadi alla andmebaas", + "Drag and drop a file to upload or select a file to view": "Lohistage ja kukutage fail üleslaadimiseks või valige fail vaatamiseks", + "Draw": "Joonista", + "Drop any files here to add to the conversation": "Lohistage siia mistahes failid, et lisada need vestlusele", + "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "nt '30s', '10m'. Kehtivad ajaühikud on 's', 'm', 'h'.", + "e.g. 60": "nt 60", + "e.g. A filter to remove profanity from text": "nt filter, mis eemaldab tekstist roppused", + "e.g. My Filter": "nt Minu Filter", + "e.g. My Tools": "nt Minu Tööriistad", + "e.g. my_filter": "nt minu_filter", + "e.g. my_tools": "nt minu_toriistad", + "e.g. Tools for performing various operations": "nt tööriistad mitmesuguste operatsioonide teostamiseks", + "Edit": "Muuda", + "Edit Arena Model": "Muuda Areena mudelit", + "Edit Channel": "Muuda kanalit", + "Edit Connection": "Muuda ühendust", + "Edit Default Permissions": "Muuda vaikimisi õigusi", + "Edit Memory": "Muuda mälu", + "Edit User": "Muuda kasutajat", + "Edit User Group": "Muuda kasutajagruppi", + "ElevenLabs": "ElevenLabs", + "Email": "E-post", + "Embark on adventures": "Alusta seiklusi", + "Embedding": "Manustamine", + "Embedding Batch Size": "Manustamise partii suurus", + "Embedding Model": "Manustamise mudel", + "Embedding Model Engine": "Manustamise mudeli mootor", + "Embedding model set to \"{{embedding_model}}\"": "Manustamise mudel määratud kui \"{{embedding_model}}\"", + "Enable API Key": "Luba API võti", + "Enable autocomplete generation for chat messages": "Luba automaattäitmise genereerimine vestlussõnumitele", + "Enable Code Execution": "Luba koodi täitmine", + "Enable Code Interpreter": "Luba koodi interpretaator", + "Enable Community Sharing": "Luba kogukonnaga jagamine", + "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Luba mälu lukustamine (mlock), et vältida mudeli andmete vahetamist RAM-ist välja. See valik lukustab mudeli töökomplekti lehed RAM-i, tagades, et neid ei vahetata kettale. See aitab säilitada jõudlust, vältides lehevigu ja tagades kiire andmete juurdepääsu.", + "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Luba mälu kaardistamine (mmap) mudeli andmete laadimiseks. See valik võimaldab süsteemil kasutada kettamahtu RAM-i laiendusena, koheldes kettafaile nii, nagu need oleksid RAM-is. See võib parandada mudeli jõudlust, võimaldades kiiremat andmete juurdepääsu. See ei pruugi siiski kõigi süsteemidega õigesti töötada ja võib tarbida märkimisväärse koguse kettaruumi.", + "Enable Message Rating": "Luba sõnumite hindamine", + "Enable Mirostat sampling for controlling perplexity.": "Luba Mirostat'i valim perplekssuse juhtimiseks.", + "Enable New Sign Ups": "Luba uued registreerimised", + "Enabled": "Lubatud", + "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Veenduge, et teie CSV-fail sisaldab 4 veergu selles järjekorras: Nimi, E-post, Parool, Roll.", + "Enter {{role}} message here": "Sisestage {{role}} sõnum siia", + "Enter a detail about yourself for your LLMs to recall": "Sisestage detail enda kohta, mida teie LLM-id saavad meenutada", + "Enter api auth string (e.g. username:password)": "Sisestage api autentimisstring (nt kasutajanimi:parool)", + "Enter Application DN": "Sisestage rakenduse DN", + "Enter Application DN Password": "Sisestage rakenduse DN parool", + "Enter Bing Search V7 Endpoint": "Sisestage Bing Search V7 lõpp-punkt", + "Enter Bing Search V7 Subscription Key": "Sisestage Bing Search V7 tellimuse võti", + "Enter Bocha Search API Key": "Sisestage Bocha Search API võti", + "Enter Brave Search API Key": "Sisestage Brave Search API võti", + "Enter certificate path": "Sisestage sertifikaadi tee", + "Enter CFG Scale (e.g. 7.0)": "Sisestage CFG skaala (nt 7.0)", + "Enter Chunk Overlap": "Sisestage tükkide ülekate", + "Enter Chunk Size": "Sisestage tüki suurus", + "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Sisestage komadega eraldatud \"token:kallutuse_väärtus\" paarid (näide: 5432:100, 413:-100)", + "Enter description": "Sisestage kirjeldus", + "Enter Docling Server URL": "", + "Enter Document Intelligence Endpoint": "Sisestage dokumendi intelligentsuse lõpp-punkt", + "Enter Document Intelligence Key": "Sisestage dokumendi intelligentsuse võti", + "Enter domains separated by commas (e.g., example.com,site.org)": "Sisestage domeenid komadega eraldatult (nt example.com,site.org)", + "Enter Exa API Key": "Sisestage Exa API võti", + "Enter Github Raw URL": "Sisestage Github toorURL", + "Enter Google PSE API Key": "Sisestage Google PSE API võti", + "Enter Google PSE Engine Id": "Sisestage Google PSE mootori ID", + "Enter Image Size (e.g. 512x512)": "Sisestage pildi suurus (nt 512x512)", + "Enter Jina API Key": "Sisestage Jina API võti", + "Enter Jupyter Password": "Sisestage Jupyter parool", + "Enter Jupyter Token": "Sisestage Jupyter token", + "Enter Jupyter URL": "Sisestage Jupyter URL", + "Enter Kagi Search API Key": "Sisestage Kagi Search API võti", + "Enter Key Behavior": "Sisestage võtme käitumine", + "Enter language codes": "Sisestage keelekoodid", + "Enter Model ID": "Sisestage mudeli ID", + "Enter model tag (e.g. {{modelTag}})": "Sisestage mudeli silt (nt {{modelTag}})", + "Enter Mojeek Search API Key": "Sisestage Mojeek Search API võti", + "Enter Number of Steps (e.g. 50)": "Sisestage sammude arv (nt 50)", + "Enter Perplexity API Key": "Sisestage Perplexity API võti", + "Enter proxy URL (e.g. https://user:password@host:port)": "Sisestage puhverserveri URL (nt https://kasutaja:parool@host:port)", + "Enter reasoning effort": "Sisestage arutluspingutus", + "Enter Sampler (e.g. Euler a)": "Sisestage valimismeetod (nt Euler a)", + "Enter Scheduler (e.g. Karras)": "Sisestage planeerija (nt Karras)", + "Enter Score": "Sisestage skoor", + "Enter SearchApi API Key": "Sisestage SearchApi API võti", + "Enter SearchApi Engine": "Sisestage SearchApi mootor", + "Enter Searxng Query URL": "Sisestage Searxng päringu URL", + "Enter Seed": "Sisestage seeme", + "Enter SerpApi API Key": "Sisestage SerpApi API võti", + "Enter SerpApi Engine": "Sisestage SerpApi mootor", + "Enter Serper API Key": "Sisestage Serper API võti", + "Enter Serply API Key": "Sisestage Serply API võti", + "Enter Serpstack API Key": "Sisestage Serpstack API võti", + "Enter server host": "Sisestage serveri host", + "Enter server label": "Sisestage serveri silt", + "Enter server port": "Sisestage serveri port", + "Enter stop sequence": "Sisestage lõpetamise järjestus", + "Enter system prompt": "Sisestage süsteemi vihjed", + "Enter Tavily API Key": "Sisestage Tavily API võti", + "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Sisestage oma WebUI avalik URL. Seda URL-i kasutatakse teadaannetes linkide genereerimiseks.", + "Enter Tika Server URL": "Sisestage Tika serveri URL", + "Enter timeout in seconds": "Sisestage aegumine sekundites", + "Enter to Send": "Enter saatmiseks", + "Enter Top K": "Sisestage Top K", + "Enter URL (e.g. http://127.0.0.1:7860/)": "Sisestage URL (nt http://127.0.0.1:7860/)", + "Enter URL (e.g. http://localhost:11434)": "Sisestage URL (nt http://localhost:11434)", + "Enter your current password": "Sisestage oma praegune parool", + "Enter Your Email": "Sisestage oma e-post", + "Enter Your Full Name": "Sisestage oma täisnimi", + "Enter your message": "Sisestage oma sõnum", + "Enter your new password": "Sisestage oma uus parool", + "Enter Your Password": "Sisestage oma parool", + "Enter Your Role": "Sisestage oma roll", + "Enter Your Username": "Sisestage oma kasutajanimi", + "Enter your webhook URL": "Sisestage oma webhook URL", + "Error": "Viga", + "ERROR": "VIGA", + "Error accessing Google Drive: {{error}}": "Viga Google Drive'i juurdepääsul: {{error}}", + "Error uploading file: {{error}}": "Viga faili üleslaadimisel: {{error}}", + "Evaluations": "Hindamised", + "Exa API Key": "Exa API võti", + "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Näide: (&(objectClass=inetOrgPerson)(uid=%s))", + "Example: ALL": "Näide: ALL", + "Example: mail": "Näide: mail", + "Example: ou=users,dc=foo,dc=example": "Näide: ou=users,dc=foo,dc=example", + "Example: sAMAccountName or uid or userPrincipalName": "Näide: sAMAccountName või uid või userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Ületasite litsentsis määratud istekohtade arvu. Palun võtke ühendust toega, et suurendada istekohtade arvu.", + "Exclude": "Välista", + "Execute code for analysis": "Käivita kood analüüsimiseks", + "Expand": "Laienda", + "Experimental": "Katsetuslik", + "Explain": "Selgita", + "Explain this section to me in more detail": "Selgitage seda lõiku mulle üksikasjalikumalt", + "Explore the cosmos": "Uuri kosmosest", + "Export": "Ekspordi", + "Export All Archived Chats": "Ekspordi kõik arhiveeritud vestlused", + "Export All Chats (All Users)": "Ekspordi kõik vestlused (kõik kasutajad)", + "Export chat (.json)": "Ekspordi vestlus (.json)", + "Export Chats": "Ekspordi vestlused", + "Export Config to JSON File": "Ekspordi seadistus JSON-failina", + "Export Functions": "Ekspordi funktsioonid", + "Export Models": "Ekspordi mudelid", + "Export Presets": "Ekspordi eelseadistused", + "Export Prompts": "Ekspordi vihjed", + "Export to CSV": "Ekspordi CSV-na", + "Export Tools": "Ekspordi tööriistad", + "External": "", + "External Models": "Välised mudelid", + "Failed to add file.": "Faili lisamine ebaõnnestus.", + "Failed to create API Key.": "API võtme loomine ebaõnnestus.", + "Failed to fetch models": "Mudelite toomine ebaõnnestus", + "Failed to read clipboard contents": "Lõikelaua sisu lugemine ebaõnnestus", + "Failed to save models configuration": "Mudelite konfiguratsiooni salvestamine ebaõnnestus", + "Failed to update settings": "Seadete uuendamine ebaõnnestus", + "Failed to upload file.": "Faili üleslaadimine ebaõnnestus.", + "Features": "Funktsioonid", + "Features Permissions": "Funktsioonide õigused", + "February": "Veebruar", + "Feedback History": "Tagasiside ajalugu", + "Feedbacks": "Tagasisided", + "Feel free to add specific details": "Võite lisada konkreetseid üksikasju", + "File": "Fail", + "File added successfully.": "Fail edukalt lisatud.", + "File content updated successfully.": "Faili sisu edukalt uuendatud.", + "File Mode": "Faili režiim", + "File not found.": "Faili ei leitud.", + "File removed successfully.": "Fail edukalt eemaldatud.", + "File size should not exceed {{maxSize}} MB.": "Faili suurus ei tohiks ületada {{maxSize}} MB.", + "File uploaded successfully": "Fail edukalt üles laaditud", + "Files": "Failid", + "Filter is now globally disabled": "Filter on nüüd globaalselt keelatud", + "Filter is now globally enabled": "Filter on nüüd globaalselt lubatud", + "Filters": "Filtrid", + "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "Tuvastati sõrmejälje võltsimine: initsiaalide kasutamine avatarina pole võimalik. Kasutatakse vaikimisi profiilikujutist.", + "Fluidly stream large external response chunks": "Suurte väliste vastuste tükkide sujuv voogedastus", + "Focus chat input": "Fokuseeri vestluse sisendile", + "Folder deleted successfully": "Kaust edukalt kustutatud", + "Folder name cannot be empty": "Kausta nimi ei saa olla tühi", + "Folder name cannot be empty.": "Kausta nimi ei saa olla tühi.", + "Folder name updated successfully": "Kausta nimi edukalt uuendatud", + "Followed instructions perfectly": "Järgis juhiseid täiuslikult", + "Forge new paths": "Loo uusi radu", + "Form": "Vorm", + "Format your variables using brackets like this:": "Vormindage oma muutujad sulgudega nagu siin:", + "Frequency Penalty": "Sageduse karistus", + "Full Context Mode": "Täiskonteksti režiim", + "Function": "Funktsioon", + "Function Calling": "Funktsiooni kutsumine", + "Function created successfully": "Funktsioon edukalt loodud", + "Function deleted successfully": "Funktsioon edukalt kustutatud", + "Function Description": "Funktsiooni kirjeldus", + "Function ID": "Funktsiooni ID", + "Function is now globally disabled": "Funktsioon on nüüd globaalselt keelatud", + "Function is now globally enabled": "Funktsioon on nüüd globaalselt lubatud", + "Function Name": "Funktsiooni nimi", + "Function updated successfully": "Funktsioon edukalt uuendatud", + "Functions": "Funktsioonid", + "Functions allow arbitrary code execution": "Funktsioonid võimaldavad suvalise koodi käivitamist", + "Functions allow arbitrary code execution.": "Funktsioonid võimaldavad suvalise koodi käivitamist.", + "Functions imported successfully": "Funktsioonid edukalt imporditud", + "Gemini": "Gemini", + "Gemini API Config": "Gemini API seadistus", + "Gemini API Key is required.": "Gemini API võti on nõutav.", + "General": "Üldine", + "Generate an image": "Genereeri pilt", + "Generate Image": "Genereeri pilt", + "Generate prompt pair": "Genereeri vihjete paar", + "Generating search query": "Otsinguküsimuse genereerimine", + "Get started": "Alusta", + "Get started with {{WEBUI_NAME}}": "Alusta {{WEBUI_NAME}} kasutamist", + "Global": "Globaalne", + "Good Response": "Hea vastus", + "Google Drive": "Google Drive", + "Google PSE API Key": "Google PSE API võti", + "Google PSE Engine Id": "Google PSE mootori ID", + "Group created successfully": "Grupp edukalt loodud", + "Group deleted successfully": "Grupp edukalt kustutatud", + "Group Description": "Grupi kirjeldus", + "Group Name": "Grupi nimi", + "Group updated successfully": "Grupp edukalt uuendatud", + "Groups": "Grupid", + "Haptic Feedback": "Haptiline tagasiside", + "has no conversations.": "vestlused puuduvad.", + "Hello, {{name}}": "Tere, {{name}}", + "Help": "Abi", + "Help us create the best community leaderboard by sharing your feedback history!": "Aidake meil luua parim kogukonna edetabel, jagades oma tagasiside ajalugu!", + "Hex Color": "Hex värv", + "Hex Color - Leave empty for default color": "Hex värv - jätke tühjaks vaikevärvi jaoks", + "Hide": "Peida", + "Home": "Avaleht", + "Host": "Host", + "How can I help you today?": "Kuidas saan teid täna aidata?", + "How would you rate this response?": "Kuidas hindaksite seda vastust?", + "Hybrid Search": "Hübriidotsing", + "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Kinnitan, et olen lugenud ja mõistan oma tegevuse tagajärgi. Olen teadlik suvalise koodi käivitamisega seotud riskidest ja olen kontrollinud allika usaldusväärsust.", + "ID": "ID", + "Ignite curiosity": "Süüta uudishimu", + "Image": "Pilt", + "Image Compression": "Pildi tihendamine", + "Image Generation": "Pildi genereerimine", + "Image Generation (Experimental)": "Pildi genereerimine (katsetuslik)", + "Image Generation Engine": "Pildi genereerimise mootor", + "Image Max Compression Size": "Pildi maksimaalne tihendamise suurus", + "Image Prompt Generation": "Pildi vihje genereerimine", + "Image Prompt Generation Prompt": "Pildi vihje genereerimise vihje", + "Image Settings": "Pildi seaded", + "Images": "Pildid", + "Import Chats": "Impordi vestlused", + "Import Config from JSON File": "Impordi seadistus JSON-failist", + "Import Functions": "Impordi funktsioonid", + "Import Models": "Impordi mudelid", + "Import Presets": "Impordi eelseadistused", + "Import Prompts": "Impordi vihjed", + "Import Tools": "Impordi tööriistad", + "Include": "Kaasa", + "Include `--api-auth` flag when running stable-diffusion-webui": "Lisage `--api-auth` lipp stable-diffusion-webui käivitamisel", + "Include `--api` flag when running stable-diffusion-webui": "Lisage `--api` lipp stable-diffusion-webui käivitamisel", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Mõjutab, kui kiiresti algoritm reageerib genereeritud teksti tagasisidele. Madalam õppimiskiirus annab tulemuseks aeglasemad kohandused, samas kui kõrgem õppimiskiirus muudab algoritmi tundlikumaks.", + "Info": "Info", + "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "", + "Input commands": "Sisendkäsud", + "Install from Github URL": "Installige Github URL-ilt", + "Instant Auto-Send After Voice Transcription": "Kohene automaatne saatmine pärast hääle transkriptsiooni", + "Integration": "Integratsioon", + "Interface": "Kasutajaliides", + "Invalid file format.": "Vigane failiformaat.", + "Invalid Tag": "Vigane silt", + "is typing...": "kirjutab...", + "January": "Jaanuar", + "Jina API Key": "Jina API võti", + "join our Discord for help.": "liituge abi saamiseks meie Discordiga.", + "JSON": "JSON", + "JSON Preview": "JSON eelvaade", + "July": "Juuli", + "June": "Juuni", + "Jupyter Auth": "Jupyter autentimine", + "Jupyter URL": "Jupyter URL", + "JWT Expiration": "JWT aegumine", + "JWT Token": "JWT token", + "Kagi Search API Key": "Kagi Search API võti", + "Keep Alive": "Hoia elus", + "Key": "Võti", + "Keyboard shortcuts": "Klaviatuuri otseteed", + "Knowledge": "Teadmised", + "Knowledge Access": "Teadmiste juurdepääs", + "Knowledge created successfully.": "Teadmised edukalt loodud.", + "Knowledge deleted successfully.": "Teadmised edukalt kustutatud.", + "Knowledge reset successfully.": "Teadmised edukalt lähtestatud.", + "Knowledge updated successfully": "Teadmised edukalt uuendatud", + "Kokoro.js (Browser)": "Kokoro.js (brauser)", + "Kokoro.js Dtype": "Kokoro.js andmetüüp", + "Label": "Silt", + "Landing Page Mode": "Maandumislehe režiim", + "Language": "Keel", + "Last Active": "Viimati aktiivne", + "Last Modified": "Viimati muudetud", + "Last reply": "Viimane vastus", + "LDAP": "LDAP", + "LDAP server updated": "LDAP server uuendatud", + "Leaderboard": "Edetabel", + "Leave empty for unlimited": "Jäta tühjaks piiranguta kasutamiseks", + "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/api/tags\" lõpp-punktist", + "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/models\" lõpp-punktist", + "Leave empty to include all models or select specific models": "Jäta tühjaks, et kaasata kõik mudelid või vali konkreetsed mudelid", + "Leave empty to use the default prompt, or enter a custom prompt": "Jäta tühjaks, et kasutada vaikimisi vihjet, või sisesta kohandatud vihje", + "Leave model field empty to use the default model.": "Jäta mudeli väli tühjaks, et kasutada vaikimisi mudelit.", + "License": "Litsents", + "Light": "Hele", + "Listening...": "Kuulamine...", + "Llama.cpp": "Llama.cpp", + "LLMs can make mistakes. Verify important information.": "LLM-id võivad teha vigu. Kontrollige olulist teavet.", + "Loader": "Laadija", + "Loading Kokoro.js...": "Kokoro.js laadimine...", + "Local": "Kohalik", + "Local Models": "Kohalikud mudelid", + "Location access not allowed": "Asukoha juurdepääs pole lubatud", + "Logit Bias": "Logiti kallutatus", + "Lost": "Kaotanud", + "LTR": "LTR", + "Made by Open WebUI Community": "Loodud Open WebUI kogukonna poolt", + "Make sure to enclose them with": "Veenduge, et need on ümbritsetud järgmisega:", + "Make sure to export a workflow.json file as API format from ComfyUI.": "Veenduge, et ekspordite workflow.json faili API formaadis ComfyUI-st.", + "Manage": "Halda", + "Manage Direct Connections": "Halda otseseid ühendusi", + "Manage Models": "Halda mudeleid", + "Manage Ollama": "Halda Ollama't", + "Manage Ollama API Connections": "Halda Ollama API ühendusi", + "Manage OpenAI API Connections": "Halda OpenAI API ühendusi", + "Manage Pipelines": "Halda torustikke", + "March": "Märts", + "Max Tokens (num_predict)": "Max tokeneid (num_predict)", + "Max Upload Count": "Maksimaalne üleslaadimiste arv", + "Max Upload Size": "Maksimaalne üleslaadimise suurus", + "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Korraga saab alla laadida maksimaalselt 3 mudelit. Palun proovige hiljem uuesti.", + "May": "Mai", + "Memories accessible by LLMs will be shown here.": "LLM-idele ligipääsetavad mälestused kuvatakse siin.", + "Memory": "Mälu", + "Memory added successfully": "Mälu edukalt lisatud", + "Memory cleared successfully": "Mälu edukalt tühjendatud", + "Memory deleted successfully": "Mälu edukalt kustutatud", + "Memory updated successfully": "Mälu edukalt uuendatud", + "Merge Responses": "Ühenda vastused", + "Message rating should be enabled to use this feature": "Selle funktsiooni kasutamiseks peaks sõnumite hindamine olema lubatud", + "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Teie saadetud sõnumeid pärast lingi loomist ei jagata. Kasutajad, kellel on URL, saavad vaadata jagatud vestlust.", + "Min P": "Min P", + "Minimum Score": "Minimaalne skoor", + "Mirostat": "Mirostat", + "Mirostat Eta": "Mirostat Eta", + "Mirostat Tau": "Mirostat Tau", + "Model": "Mudel", + "Model '{{modelName}}' has been successfully downloaded.": "Mudel '{{modelName}}' on edukalt alla laaditud.", + "Model '{{modelTag}}' is already in queue for downloading.": "Mudel '{{modelTag}}' on juba allalaadimise järjekorras.", + "Model {{modelId}} not found": "Mudelit {{modelId}} ei leitud", + "Model {{modelName}} is not vision capable": "Mudel {{modelName}} ei ole võimeline visuaalseid sisendeid töötlema", + "Model {{name}} is now {{status}}": "Mudel {{name}} on nüüd {{status}}", + "Model accepts image inputs": "Mudel võtab vastu pilte sisendina", + "Model created successfully!": "Mudel edukalt loodud!", + "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Tuvastati mudeli failisüsteemi tee. Uuendamiseks on vajalik mudeli lühinimi, ei saa jätkata.", + "Model Filtering": "Mudeli filtreerimine", + "Model ID": "Mudeli ID", + "Model IDs": "Mudeli ID-d", + "Model Name": "Mudeli nimi", + "Model not selected": "Mudel pole valitud", + "Model Params": "Mudeli parameetrid", + "Model Permissions": "Mudeli õigused", + "Model updated successfully": "Mudel edukalt uuendatud", + "Modelfile Content": "Modelfile sisu", + "Models": "Mudelid", + "Models Access": "Mudelite juurdepääs", + "Models configuration saved successfully": "Mudelite seadistus edukalt salvestatud", + "Mojeek Search API Key": "Mojeek Search API võti", + "more": "rohkem", + "More": "Rohkem", + "Name": "Nimi", + "Name your knowledge base": "Nimetage oma teadmiste baas", + "Native": "Omane", + "New Chat": "Uus vestlus", + "New Folder": "Uus kaust", + "New Password": "Uus parool", + "new-channel": "uus-kanal", + "No content found": "Sisu ei leitud", + "No content to speak": "Pole mida rääkida", + "No distance available": "Kaugus pole saadaval", + "No feedbacks found": "Tagasisidet ei leitud", + "No file selected": "Faili pole valitud", + "No files found.": "Faile ei leitud.", + "No groups with access, add a group to grant access": "Puuduvad juurdepääsuõigustega grupid, lisage grupp juurdepääsu andmiseks", + "No HTML, CSS, or JavaScript content found.": "HTML, CSS ega JavaScript sisu ei leitud.", + "No inference engine with management support found": "Järeldusmootorit haldamise toega ei leitud", + "No knowledge found": "Teadmisi ei leitud", + "No memories to clear": "Pole mälestusi, mida kustutada", + "No model IDs": "Mudeli ID-d puuduvad", + "No models found": "Mudeleid ei leitud", + "No models selected": "Mudeleid pole valitud", + "No results found": "Tulemusi ei leitud", + "No search query generated": "Otsingupäringut ei genereeritud", + "No source available": "Allikas pole saadaval", + "No users were found.": "Kasutajaid ei leitud.", + "No valves to update": "Pole klappe, mida uuendada", + "None": "Mitte ühtegi", + "Not factually correct": "Faktiliselt ebakorrektne", + "Not helpful": "Pole abistav", + "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Märkus: kui määrate minimaalse skoori, tagastab otsing ainult dokumendid, mille skoor on suurem või võrdne minimaalse skooriga.", + "Notes": "Märkmed", + "Notification Sound": "Teavituse heli", + "Notification Webhook": "Teavituse webhook", + "Notifications": "Teavitused", + "November": "November", + "num_gpu (Ollama)": "num_gpu (Ollama)", + "num_thread (Ollama)": "num_thread (Ollama)", + "OAuth ID": "OAuth ID", + "October": "Oktoober", + "Off": "Väljas", + "Okay, Let's Go!": "Hea küll, lähme!", + "OLED Dark": "OLED tume", + "Ollama": "Ollama", + "Ollama API": "Ollama API", + "Ollama API settings updated": "Ollama API seaded uuendatud", + "Ollama Version": "Ollama versioon", + "On": "Sees", + "OneDrive": "OneDrive", + "Only alphanumeric characters and hyphens are allowed": "Lubatud on ainult tähtede-numbrite kombinatsioonid ja sidekriipsud", + "Only alphanumeric characters and hyphens are allowed in the command string.": "Käsustringis on lubatud ainult tähtede-numbrite kombinatsioonid ja sidekriipsud.", + "Only collections can be edited, create a new knowledge base to edit/add documents.": "Muuta saab ainult kogusid, dokumentide muutmiseks/lisamiseks looge uus teadmiste baas.", + "Only select users and groups with permission can access": "Juurdepääs on ainult valitud õigustega kasutajatel ja gruppidel", + "Oops! Looks like the URL is invalid. Please double-check and try again.": "Oih! URL tundub olevat vigane. Palun kontrollige ja proovige uuesti.", + "Oops! There are files still uploading. Please wait for the upload to complete.": "Oih! Failide üleslaadimine on veel pooleli. Palun oodake, kuni üleslaadimine lõpeb.", + "Oops! There was an error in the previous response.": "Oih! Eelmises vastuses oli viga.", + "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oih! Kasutate toetamatut meetodit (ainult kasutajaliides). Palun serveerige WebUI tagarakendusest.", + "Open file": "Ava fail", + "Open in full screen": "Ava täisekraanil", + "Open new chat": "Ava uus vestlus", + "Open WebUI uses faster-whisper internally.": "Open WebUI kasutab sisemiselt faster-whisper'it.", + "Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.": "Open WebUI kasutab SpeechT5 ja CMU Arctic kõneleja manustamisi.", + "Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "Open WebUI versioon (v{{OPEN_WEBUI_VERSION}}) on madalam kui nõutav versioon (v{{REQUIRED_VERSION}})", + "OpenAI": "OpenAI", + "OpenAI API": "OpenAI API", + "OpenAI API Config": "OpenAI API seadistus", + "OpenAI API Key is required.": "OpenAI API võti on nõutav.", + "OpenAI API settings updated": "OpenAI API seaded uuendatud", + "OpenAI URL/Key required.": "OpenAI URL/võti on nõutav.", + "or": "või", + "Organize your users": "Korraldage oma kasutajad", + "Other": "Muu", + "OUTPUT": "VÄLJUND", + "Output format": "Väljundformaat", + "Overview": "Ülevaade", + "page": "leht", + "Password": "Parool", + "Paste Large Text as File": "Kleebi suur tekst failina", + "PDF document (.pdf)": "PDF dokument (.pdf)", + "PDF Extract Images (OCR)": "PDF-ist piltide väljavõtmine (OCR)", + "pending": "ootel", + "Permission denied when accessing media devices": "Juurdepääs meediumiseadmetele keelatud", + "Permission denied when accessing microphone": "Juurdepääs mikrofonile keelatud", + "Permission denied when accessing microphone: {{error}}": "Juurdepääs mikrofonile keelatud: {{error}}", + "Permissions": "Õigused", + "Perplexity API Key": "Perplexity API võti", + "Personalization": "Isikupärastamine", + "Pin": "Kinnita", + "Pinned": "Kinnitatud", + "Pioneer insights": "Pioneeri arusaamad", + "Pipeline deleted successfully": "Torustik edukalt kustutatud", + "Pipeline downloaded successfully": "Torustik edukalt alla laaditud", + "Pipelines": "Torustikud", + "Pipelines Not Detected": "Torustikke ei tuvastatud", + "Pipelines Valves": "Torustike klapid", + "Plain text (.txt)": "Lihttekst (.txt)", + "Playground": "Mänguväljak", + "Please carefully review the following warnings:": "Palun vaadake hoolikalt läbi järgmised hoiatused:", + "Please do not close the settings page while loading the model.": "Palun ärge sulgege seadete lehte mudeli laadimise ajal.", + "Please enter a prompt": "Palun sisestage vihje", + "Please fill in all fields.": "Palun täitke kõik väljad.", + "Please select a model first.": "Palun valige esmalt mudel.", + "Please select a model.": "Palun valige mudel.", + "Please select a reason": "Palun valige põhjus", + "Port": "Port", + "Positive attitude": "Positiivne suhtumine", + "Prefix ID": "Prefiksi ID", + "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "Prefiksi ID-d kasutatakse teiste ühendustega konfliktide vältimiseks, lisades mudeli ID-dele prefiksi - jätke tühjaks keelamiseks", + "Presence Penalty": "Kohaloleku karistus", + "Previous 30 days": "Eelmised 30 päeva", + "Previous 7 days": "Eelmised 7 päeva", + "Private": "", + "Profile Image": "Profiilipilt", + "Prompt": "Vihje", + "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Vihje (nt Räägi mulle üks huvitav fakt Rooma impeeriumi kohta)", + "Prompt Content": "Vihje sisu", + "Prompt created successfully": "Vihje edukalt loodud", + "Prompt suggestions": "Vihje soovitused", + "Prompt updated successfully": "Vihje edukalt uuendatud", + "Prompts": "Vihjed", + "Prompts Access": "Vihjete juurdepääs", + "Public": "", + "Pull \"{{searchValue}}\" from Ollama.com": "Tõmba \"{{searchValue}}\" Ollama.com-ist", + "Pull a model from Ollama.com": "Tõmba mudel Ollama.com-ist", + "Query Generation Prompt": "Päringu genereerimise vihje", + "RAG Template": "RAG mall", + "Rating": "Hinnang", + "Re-rank models by topic similarity": "Järjesta mudelid teema sarnasuse alusel ümber", + "Read": "Loe", + "Read Aloud": "Loe valjult", + "Reasoning Effort": "Arutluspingutus", + "Record voice": "Salvesta hääl", + "Redirecting you to Open WebUI Community": "Suunamine Open WebUI kogukonda", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Vähendab mõttetuste genereerimise tõenäosust. Kõrgem väärtus (nt 100) annab mitmekesisemaid vastuseid, samas kui madalam väärtus (nt 10) on konservatiivsem.", + "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Viita endale kui \"Kasutaja\" (nt \"Kasutaja õpib hispaania keelt\")", + "References from": "Viited allikast", + "Refused when it shouldn't have": "Keeldus, kui ei oleks pidanud", + "Regenerate": "Regenereeri", + "Release Notes": "Väljalaskemärkmed", + "Relevance": "Asjakohasus", + "Remove": "Eemalda", + "Remove Model": "Eemalda mudel", + "Rename": "Nimeta ümber", + "Reorder Models": "Muuda mudelite järjekorda", + "Repeat Last N": "Korda viimast N", + "Repeat Penalty (Ollama)": "Korduse karistus (Ollama)", + "Reply in Thread": "Vasta lõimes", + "Request Mode": "Päringu režiim", + "Reranking Model": "Ümberjärjestamise mudel", + "Reranking model disabled": "Ümberjärjestamise mudel keelatud", + "Reranking model set to \"{{reranking_model}}\"": "Ümberjärjestamise mudel määratud kui \"{{reranking_model}}\"", + "Reset": "Lähtesta", + "Reset All Models": "Lähtesta kõik mudelid", + "Reset Upload Directory": "Lähtesta üleslaadimiste kataloog", + "Reset Vector Storage/Knowledge": "Lähtesta vektormälu/teadmised", + "Reset view": "Lähtesta vaade", + "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Vastuste teavitusi ei saa aktiveerida, kuna veebisaidi õigused on keelatud. Vajalike juurdepääsude andmiseks külastage oma brauseri seadeid.", + "Response splitting": "Vastuse tükeldamine", + "Result": "Tulemus", + "Retrieval": "Taastamine", + "Retrieval Query Generation": "Taastamise päringu genereerimine", + "Rich Text Input for Chat": "Rikasteksti sisend vestluse jaoks", + "RK": "RK", + "Role": "Roll", + "Rosé Pine": "Rosé Pine", + "Rosé Pine Dawn": "Rosé Pine Dawn", + "RTL": "RTL", + "Run": "Käivita", + "Running": "Töötab", + "Save": "Salvesta", + "Save & Create": "Salvesta ja loo", + "Save & Update": "Salvesta ja uuenda", + "Save As Copy": "Salvesta koopiana", + "Save Tag": "Salvesta silt", + "Saved": "Salvestatud", + "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Vestluslogi salvestamine otse teie brauseri mällu pole enam toetatud. Palun võtke hetk, et alla laadida ja kustutada oma vestluslogi, klõpsates allpool olevat nuppu. Ärge muretsege, saate hõlpsasti oma vestluslogi tagarakendusse uuesti importida, kasutades", + "Scroll to bottom when switching between branches": "Keri alla harus liikumisel", + "Search": "Otsing", + "Search a model": "Otsi mudelit", + "Search Base": "Otsingu baas", + "Search Chats": "Otsi vestlusi", + "Search Collection": "Otsi kogust", + "Search Filters": "Otsingu filtrid", + "search for tags": "otsi silte", + "Search Functions": "Otsi funktsioone", + "Search Knowledge": "Otsi teadmisi", + "Search Models": "Otsi mudeleid", + "Search options": "Otsingu valikud", + "Search Prompts": "Otsi vihjeid", + "Search Result Count": "Otsingutulemuste arv", + "Search the internet": "Otsi internetist", + "Search Tools": "Otsi tööriistu", + "SearchApi API Key": "SearchApi API võti", + "SearchApi Engine": "SearchApi mootor", + "Searched {{count}} sites": "Otsiti {{count}} saidilt", + "Searching \"{{searchQuery}}\"": "Otsimine: \"{{searchQuery}}\"", + "Searching Knowledge for \"{{searchQuery}}\"": "Teadmistest otsimine: \"{{searchQuery}}\"", + "Searxng Query URL": "Searxng päringu URL", + "See readme.md for instructions": "Juhiste saamiseks vaadake readme.md", + "See what's new": "Vaata, mis on uut", + "Seed": "Seeme", + "Select a base model": "Valige baas mudel", + "Select a engine": "Valige mootor", + "Select a function": "Valige funktsioon", + "Select a group": "Valige grupp", + "Select a model": "Valige mudel", + "Select a pipeline": "Valige torustik", + "Select a pipeline url": "Valige torustiku URL", + "Select a tool": "Valige tööriist", + "Select an auth method": "Valige autentimismeetod", + "Select an Ollama instance": "Valige Ollama instants", + "Select Engine": "Valige mootor", + "Select Knowledge": "Valige teadmised", + "Select only one model to call": "Valige ainult üks mudel kutsumiseks", + "Selected model(s) do not support image inputs": "Valitud mudel(id) ei toeta pilte sisendina", + "Semantic distance to query": "Semantiline kaugus päringust", + "Send": "Saada", + "Send a Message": "Saada sõnum", + "Send message": "Saada sõnum", + "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Saadab `stream_options: { include_usage: true }` päringus.\nToetatud teenusepakkujad tagastavad määramisel vastuses tokeni kasutuse teabe.", + "September": "September", + "SerpApi API Key": "SerpApi API võti", + "SerpApi Engine": "SerpApi mootor", + "Serper API Key": "Serper API võti", + "Serply API Key": "Serply API võti", + "Serpstack API Key": "Serpstack API võti", + "Server connection verified": "Serveri ühendus kontrollitud", + "Set as default": "Määra vaikimisi", + "Set CFG Scale": "Määra CFG skaala", + "Set Default Model": "Määra vaikimisi mudel", + "Set embedding model": "Määra manustamise mudel", + "Set embedding model (e.g. {{model}})": "Määra manustamise mudel (nt {{model}})", + "Set Image Size": "Määra pildi suurus", + "Set reranking model (e.g. {{model}})": "Määra ümberjärjestamise mudel (nt {{model}})", + "Set Sampler": "Määra valimismeetod", + "Set Scheduler": "Määra planeerija", + "Set Steps": "Määra sammud", + "Set Task Model": "Määra ülesande mudel", + "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Määrake kihtide arv, mis laaditakse GPU-le. Selle väärtuse suurendamine võib oluliselt parandada jõudlust mudelite puhul, mis on optimeeritud GPU kiirenduse jaoks, kuid võib tarbida rohkem energiat ja GPU ressursse.", + "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Määrake arvutusteks kasutatavate töölõimede arv. See valik kontrollib, mitu lõime kasutatakse saabuvate päringute samaaegseks töötlemiseks. Selle väärtuse suurendamine võib parandada jõudlust suure samaaegsusega töökoormuste korral, kuid võib tarbida rohkem CPU ressursse.", + "Set Voice": "Määra hääl", + "Set whisper model": "Määra whisper mudel", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab tasase kallutatuse tokenite vastu, mis on esinenud vähemalt üks kord. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Seab skaleeritava kallutatuse tokenite vastu korduste karistamiseks, põhinedes sellel, mitu korda need on esinenud. Kõrgem väärtus (nt 1,5) karistab kordusi tugevamalt, samas kui madalam väärtus (nt 0,9) on leebem. Väärtuse 0 korral on see keelatud.", + "Sets how far back for the model to look back to prevent repetition.": "Määrab, kui kaugele mudel tagasi vaatab, et vältida kordusi.", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Määrab genereerimiseks kasutatava juhusliku arvu seemne. Selle määramine kindlale numbrile paneb mudeli genereerima sama teksti sama vihje korral.", + "Sets the size of the context window used to generate the next token.": "Määrab järgmise tokeni genereerimiseks kasutatava konteksti akna suuruse.", + "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Määrab kasutatavad lõpetamise järjestused. Kui see muster kohatakse, lõpetab LLM teksti genereerimise ja tagastab. Mitme lõpetamise mustri saab määrata, täpsustades modelfile'is mitu eraldi lõpetamise parameetrit.", + "Settings": "Seaded", + "Settings saved successfully!": "Seaded edukalt salvestatud!", + "Share": "Jaga", + "Share Chat": "Jaga vestlust", + "Share to Open WebUI Community": "Jaga Open WebUI kogukonnaga", + "Show": "Näita", + "Show \"What's New\" modal on login": "Näita \"Mis on uut\" modaalakent sisselogimisel", + "Show Admin Details in Account Pending Overlay": "Näita administraatori üksikasju konto ootel kattekihil", + "Show shortcuts": "Näita otseteid", + "Show your support!": "Näita oma toetust!", + "Showcased creativity": "Näitas loovust", + "Sign in": "Logi sisse", + "Sign in to {{WEBUI_NAME}}": "Logi sisse {{WEBUI_NAME}}", + "Sign in to {{WEBUI_NAME}} with LDAP": "Logi sisse {{WEBUI_NAME}} LDAP-ga", + "Sign Out": "Logi välja", + "Sign up": "Registreeru", + "Sign up to {{WEBUI_NAME}}": "Registreeru {{WEBUI_NAME}}", + "Signing in to {{WEBUI_NAME}}": "Sisselogimine {{WEBUI_NAME}}", + "sk-1234": "sk-1234", + "Source": "Allikas", + "Speech Playback Speed": "Kõne taasesituse kiirus", + "Speech recognition error: {{error}}": "Kõnetuvastuse viga: {{error}}", + "Speech-to-Text Engine": "Kõne-tekstiks mootor", + "Stop": "Peata", + "Stop Sequence": "Lõpetamise järjestus", + "Stream Chat Response": "Voogedasta vestluse vastust", + "STT Model": "STT mudel", + "STT Settings": "STT seaded", + "Subtitle (e.g. about the Roman Empire)": "Alampealkiri (nt Rooma impeeriumi kohta)", + "Success": "Õnnestus", + "Successfully updated.": "Edukalt uuendatud.", + "Suggested": "Soovitatud", + "Support": "Tugi", + "Support this plugin:": "Toeta seda pistikprogrammi:", + "Sync directory": "Sünkroniseeri kataloog", + "System": "Süsteem", + "System Instructions": "Süsteemi juhised", + "System Prompt": "Süsteemi vihje", + "Tags": "", + "Tags Generation": "Siltide genereerimine", + "Tags Generation Prompt": "Siltide genereerimise vihje", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Saba vaba valimit kasutatakse väljundis vähem tõenäoliste tokenite mõju vähendamiseks. Kõrgem väärtus (nt 2,0) vähendab mõju rohkem, samas kui väärtus 1,0 keelab selle seade.", + "Talk to model": "Räägi mudeliga", + "Tap to interrupt": "Puuduta katkestamiseks", + "Tasks": "Ülesanded", + "Tavily API Key": "Tavily API võti", + "Tell us more:": "Räägi meile lähemalt:", + "Temperature": "Temperatuur", + "Template": "Mall", + "Temporary Chat": "Ajutine vestlus", + "Text Splitter": "Teksti tükeldaja", + "Text-to-Speech Engine": "Tekst-kõneks mootor", + "Tfs Z": "Tfs Z", + "Thanks for your feedback!": "Täname tagasiside eest!", + "The Application Account DN you bind with for search": "Rakenduse konto DN, millega seote otsingu jaoks", + "The base to search for users": "Baas kasutajate otsimiseks", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "Partii suurus määrab, mitu tekstipäringut töödeldakse korraga. Suurem partii suurus võib suurendada mudeli jõudlust ja kiirust, kuid see nõuab ka rohkem mälu.", + "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Selle pistikprogrammi taga olevad arendajad on kogukonna pühendunud vabatahtlikud. Kui leiate, et see pistikprogramm on kasulik, palun kaaluge selle arendamise toetamist.", + "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hindamise edetabel põhineb Elo hindamissüsteemil ja seda uuendatakse reaalajas.", + "The LDAP attribute that maps to the mail that users use to sign in.": "LDAP atribuut, mis kaardistab e-posti, mida kasutajad kasutavad sisselogimiseks.", + "The LDAP attribute that maps to the username that users use to sign in.": "LDAP atribuut, mis kaardistab kasutajanime, mida kasutajad kasutavad sisselogimiseks.", + "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Edetabel on praegu beetaversioonina ja me võime kohandada hindamisarvutusi algoritmi täiustamisel.", + "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksimaalne failisuurus MB-des. Kui failisuurus ületab seda piiri, faili ei laadita üles.", + "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksimaalne failide arv, mida saab korraga vestluses kasutada. Kui failide arv ületab selle piiri, faile ei laadita üles.", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skoor peaks olema väärtus vahemikus 0,0 (0%) kuni 1,0 (100%).", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "Mudeli temperatuur. Temperatuuri suurendamine paneb mudeli vastama loovamalt.", + "Theme": "Teema", + "Thinking...": "Mõtleb...", + "This action cannot be undone. Do you wish to continue?": "Seda toimingut ei saa tagasi võtta. Kas soovite jätkata?", + "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "", + "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "See tagab, et teie väärtuslikud vestlused salvestatakse turvaliselt teie tagarakenduse andmebaasi. Täname!", + "This is an experimental feature, it may not function as expected and is subject to change at any time.": "See on katsetuslik funktsioon, see ei pruugi toimida ootuspäraselt ja võib igal ajal muutuda.", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "See valik kontrollib, mitu tokenit säilitatakse konteksti värskendamisel. Näiteks kui see on määratud 2-le, säilitatakse vestluse konteksti viimased 2 tokenit. Konteksti säilitamine võib aidata säilitada vestluse järjepidevust, kuid võib vähendada võimet reageerida uutele teemadele.", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "See valik määrab maksimaalse tokenite arvu, mida mudel saab oma vastuses genereerida. Selle piirmäära suurendamine võimaldab mudelil anda pikemaid vastuseid, kuid võib suurendada ka ebavajaliku või ebaolulise sisu genereerimise tõenäosust.", + "This option will delete all existing files in the collection and replace them with newly uploaded files.": "See valik kustutab kõik olemasolevad failid kogust ja asendab need äsja üleslaaditud failidega.", + "This response was generated by \"{{model}}\"": "Selle vastuse genereeris \"{{model}}\"", + "This will delete": "See kustutab", + "This will delete {{NAME}} and all its contents.": "See kustutab {{NAME}} ja kogu selle sisu.", + "This will delete all models including custom models": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid", + "This will delete all models including custom models and cannot be undone.": "See kustutab kõik mudelid, sealhulgas kohandatud mudelid, ja seda ei saa tagasi võtta.", + "This will reset the knowledge base and sync all files. Do you wish to continue?": "See lähtestab teadmiste baasi ja sünkroniseerib kõik failid. Kas soovite jätkata?", + "Thorough explanation": "Põhjalik selgitus", + "Thought for {{DURATION}}": "Mõtles {{DURATION}}", + "Thought for {{DURATION}} seconds": "Mõtles {{DURATION}} sekundit", + "Tika": "Tika", + "Tika Server URL required.": "Tika serveri URL on nõutav.", + "Tiktoken": "Tiktoken", + "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Nõuanne: Värskendage mitut muutuja kohta järjestikku, vajutades pärast iga asendust vestluse sisendis tabeldusklahvi.", + "Title": "Pealkiri", + "Title (e.g. Tell me a fun fact)": "Pealkiri (nt Räägi mulle üks huvitav fakt)", + "Title Auto-Generation": "Pealkirja automaatne genereerimine", + "Title cannot be an empty string.": "Pealkiri ei saa olla tühi string.", + "Title Generation": "Pealkirja genereerimine", + "Title Generation Prompt": "Pealkirja genereerimise vihje", + "TLS": "TLS", + "To access the available model names for downloading,": "Juurdepääsuks saadaolevatele mudelinimedele allalaadimiseks,", + "To access the GGUF models available for downloading,": "Juurdepääsuks allalaadimiseks saadaolevatele GGUF mudelitele,", + "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "WebUI-le juurdepääsuks võtke ühendust administraatoriga. Administraatorid saavad hallata kasutajate staatuseid administraatori paneelist.", + "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Teadmiste baasi siia lisamiseks lisage need esmalt \"Teadmiste\" tööalale.", + "To learn more about available endpoints, visit our documentation.": "Saadaolevate lõpp-punktide kohta rohkem teada saamiseks külastage meie dokumentatsiooni.", + "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Teie privaatsuse kaitsmiseks jagatakse teie tagasisidest ainult hinnanguid, mudeli ID-sid, silte ja metaandmeid - teie vestluslogi jääb privaatseks ja neid ei kaasata.", + "To select actions here, add them to the \"Functions\" workspace first.": "Toimingute siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", + "To select filters here, add them to the \"Functions\" workspace first.": "Filtrite siit valimiseks lisage need esmalt \"Funktsioonide\" tööalale.", + "To select toolkits here, add them to the \"Tools\" workspace first.": "Tööriistakomplektide siit valimiseks lisage need esmalt \"Tööriistade\" tööalale.", + "Toast notifications for new updates": "Hüpikmärguanded uuenduste kohta", + "Today": "Täna", + "Toggle settings": "Lülita seaded", + "Toggle sidebar": "Lülita külgriba", + "Token": "Token", + "Tokens To Keep On Context Refresh (num_keep)": "Konteksti värskendamisel säilitatavad tokenid (num_keep)", + "Too verbose": "Liiga paljusõnaline", + "Tool created successfully": "Tööriist edukalt loodud", + "Tool deleted successfully": "Tööriist edukalt kustutatud", + "Tool Description": "Tööriista kirjeldus", + "Tool ID": "Tööriista ID", + "Tool imported successfully": "Tööriist edukalt imporditud", + "Tool Name": "Tööriista nimi", + "Tool updated successfully": "Tööriist edukalt uuendatud", + "Tools": "Tööriistad", + "Tools Access": "Tööriistade juurdepääs", + "Tools are a function calling system with arbitrary code execution": "Tööriistad on funktsioonide kutsumise süsteem suvalise koodi täitmisega", + "Tools Function Calling Prompt": "Tööriistade funktsioonide kutsumise vihje", + "Tools have a function calling system that allows arbitrary code execution": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist", + "Tools have a function calling system that allows arbitrary code execution.": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist.", + "Top K": "Top K", + "Top P": "Top P", + "Transformers": "Transformers", + "Trouble accessing Ollama?": "Probleeme Ollama juurdepääsuga?", + "Trust Proxy Environment": "Usalda puhverserveri keskkonda", + "TTS Model": "TTS mudel", + "TTS Settings": "TTS seaded", + "TTS Voice": "TTS hääl", + "Type": "Tüüp", + "Type Hugging Face Resolve (Download) URL": "Sisestage Hugging Face Resolve (Allalaadimise) URL", + "Uh-oh! There was an issue with the response.": "Oi-oi! Vastusega oli probleem.", + "UI": "Kasutajaliides", + "Unarchive All": "Eemalda kõik arhiivist", + "Unarchive All Archived Chats": "Eemalda kõik arhiveeritud vestlused arhiivist", + "Unarchive Chat": "Eemalda vestlus arhiivist", + "Unlock mysteries": "Ava mõistatused", + "Unpin": "Võta lahti", + "Unravel secrets": "Ava saladused", + "Untagged": "Sildistamata", + "Update": "Uuenda", + "Update and Copy Link": "Uuenda ja kopeeri link", + "Update for the latest features and improvements.": "Uuendage, et saada uusimad funktsioonid ja täiustused.", + "Update password": "Uuenda parooli", + "Updated": "Uuendatud", + "Updated at": "Uuendamise aeg", + "Updated At": "Uuendamise aeg", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Uuendage litsentseeritud plaanile täiustatud võimaluste jaoks, sealhulgas kohandatud teemad ja bränding ning pühendatud tugi.", + "Upload": "Laadi üles", + "Upload a GGUF model": "Laadige üles GGUF mudel", + "Upload directory": "Üleslaadimise kataloog", + "Upload files": "Laadi failid üles", + "Upload Files": "Laadi failid üles", + "Upload Pipeline": "Laadi torustik üles", + "Upload Progress": "Üleslaadimise progress", + "URL": "URL", + "URL Mode": "URL režiim", + "Use '#' in the prompt input to load and include your knowledge.": "Kasutage '#' vihjete sisendis, et laadida ja kaasata oma teadmised.", + "Use Gravatar": "Kasuta Gravatari", + "Use groups to group your users and assign permissions.": "Kasutage gruppe oma kasutajate grupeerimiseks ja õiguste määramiseks.", + "Use Initials": "Kasuta initsiaale", + "use_mlock (Ollama)": "use_mlock (Ollama)", + "use_mmap (Ollama)": "use_mmap (Ollama)", + "user": "kasutaja", + "User": "Kasutaja", + "User location successfully retrieved.": "Kasutaja asukoht edukalt hangitud.", + "Username": "Kasutajanimi", + "Users": "Kasutajad", + "Using the default arena model with all models. Click the plus button to add custom models.": "Kasutatakse vaikimisi areena mudelit kõigi mudelitega. Kohandatud mudelite lisamiseks klõpsake plussmärgiga nuppu.", + "Utilize": "Kasuta", + "Valid time units:": "Kehtivad ajaühikud:", + "Valves": "Klapid", + "Valves updated": "Klapid uuendatud", + "Valves updated successfully": "Klapid edukalt uuendatud", + "variable": "muutuja", + "variable to have them replaced with clipboard content.": "muutuja, et need asendataks lõikelaua sisuga.", + "Verify Connection": "", + "Version": "Versioon", + "Version {{selectedVersion}} of {{totalVersions}}": "Versioon {{selectedVersion}} / {{totalVersions}}", + "View Replies": "Vaata vastuseid", + "Visibility": "Nähtavus", + "Voice": "Hääl", + "Voice Input": "Hääle sisend", + "Warning": "Hoiatus", + "Warning:": "Hoiatus:", + "Warning: Enabling this will allow users to upload arbitrary code on the server.": "Hoiatus: Selle lubamine võimaldab kasutajatel üles laadida suvalist koodi serverisse.", + "Warning: If you update or change your embedding model, you will need to re-import all documents.": "Hoiatus: Kui uuendate või muudate oma manustamise mudelit, peate kõik dokumendid uuesti importima.", + "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Hoiatus: Jupyter täitmine võimaldab suvalise koodi käivitamist, mis kujutab endast tõsist turvariski - jätkake äärmise ettevaatusega.", + "Web": "Veeb", + "Web API": "Veebi API", + "Web Search": "Veebiotsing", + "Web Search Engine": "Veebi otsingumootor", + "Web Search in Chat": "Veebiotsing vestluses", + "Web Search Query Generation": "Veebi otsingupäringu genereerimine", + "Webhook URL": "Webhooki URL", + "WebUI Settings": "WebUI seaded", + "WebUI URL": "WebUI URL", + "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI teeb päringuid aadressile \"{{url}}/api/chat\"", + "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI teeb päringuid aadressile \"{{url}}/chat/completions\"", + "What are you trying to achieve?": "Mida te püüate saavutada?", + "What are you working on?": "Millega te tegelete?", + "What’s New in": "Mis on uut", + "When enabled, the model will respond to each chat message in real-time, generating a response as soon as the user sends a message. This mode is useful for live chat applications, but may impact performance on slower hardware.": "Kui see on lubatud, vastab mudel igale vestlussõnumile reaalajas, genereerides vastuse niipea, kui kasutaja sõnumi saadab. See režiim on kasulik reaalajas vestlusrakendustes, kuid võib mõjutada jõudlust aeglasema riistvara puhul.", + "wherever you are": "kus iganes te olete", + "Whisper (Local)": "Whisper (lokaalne)", + "Why?": "Miks?", + "Widescreen Mode": "Laiekraani režiim", + "Won": "Võitis", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Töötab koos top-k-ga. Kõrgem väärtus (nt 0,95) annab tulemuseks mitmekesisema teksti, samas kui madalam väärtus (nt 0,5) genereerib keskendunuma ja konservatiivsema teksti.", + "Workspace": "Tööala", + "Workspace Permissions": "Tööala õigused", + "Write": "Kirjuta", + "Write a prompt suggestion (e.g. Who are you?)": "Kirjutage vihje soovitus (nt Kes sa oled?)", + "Write a summary in 50 words that summarizes [topic or keyword].": "Kirjutage 50-sõnaline kokkuvõte, mis võtab kokku [teema või märksõna].", + "Write something...": "Kirjutage midagi...", + "Write your model template content here": "Kirjutage oma mudeli malli sisu siia", + "Yesterday": "Eile", + "You": "Sina", + "You are currently using a trial license. Please contact support to upgrade your license.": "Kasutate praegu proovilitsentsi. Palun võtke ühendust toega, et oma litsentsi uuendada.", + "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Saate korraga vestelda maksimaalselt {{maxCount}} faili(ga).", + "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Saate isikupärastada oma suhtlust LLM-idega, lisades mälestusi alumise 'Halda' nupu kaudu, muutes need kasulikumaks ja teile kohandatumaks.", + "You cannot upload an empty file.": "Te ei saa üles laadida tühja faili.", + "You do not have permission to upload files": "Teil pole õigust faile üles laadida", + "You do not have permission to upload files.": "Teil pole õigust faile üles laadida.", + "You have no archived conversations.": "Teil pole arhiveeritud vestlusi.", + "You have shared this chat": "Olete seda vestlust jaganud", + "You're a helpful assistant.": "Oled abivalmis assistent.", + "You're now logged in.": "Olete nüüd sisse logitud.", + "Your account status is currently pending activation.": "Teie konto staatus on praegu ootel aktiveerimist.", + "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Kogu teie toetus läheb otse pistikprogrammi arendajale; Open WebUI ei võta mingit protsenti. Kuid valitud rahastamisplatvormil võivad olla oma tasud.", + "Youtube": "Youtube", + "Youtube Language": "Youtube keel", + "Youtube Proxy URL": "Youtube puhverserveri URL" +} diff --git a/src/lib/i18n/locales/languages.json b/src/lib/i18n/locales/languages.json index 6b509f5046a..c6517f760e3 100644 --- a/src/lib/i18n/locales/languages.json +++ b/src/lib/i18n/locales/languages.json @@ -199,4 +199,4 @@ "code": "dg-DG", "title": "Doge (🐶)" } -] \ No newline at end of file +] From cbd11cffa075c4780e915a64fbca42ef1c697b6d Mon Sep 17 00:00:00 2001 From: djismgaming Date: Thu, 20 Mar 2025 18:11:33 -0400 Subject: [PATCH 367/623] chore: update translation.json --- src/lib/i18n/locales/es-ES/translation.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index a27cebbef18..475ef4863d6 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -68,8 +68,8 @@ "Already have an account?": "¿Ya tienes una cuenta?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Una alternativa a top_p, y tiene como objetivo garantizar un equilibrio entre calidad y variedad. El parámetro p representa la probabilidad mínima para que un token sea considerado, en relación con la probabilidad del token más probable. Por ejemplo, con p=0.05 y el token más probable con una probabilidad de 0.9, los logits con un valor inferior a 0.045 son filtrados.", "Always": "Siempre", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "Siempre colapsar bloques de código", + "Always Expand Details": "Siempre expandir detalles", "Amazing": "Sorprendente", "an assistant": "un asistente", "Analyzed": "Analizado", @@ -295,7 +295,7 @@ "Describe your knowledge base and objectives": "Describe tu base de conocimientos y objetivos", "Description": "Descripción", "Didn't fully follow instructions": "No siguió las instrucciones", - "Direct": "", + "Direct": "Directo", "Direct Connections": "Conecciones Directas", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Conecciones Directas permiten a los usuarios conectarse a sus propios endpoints de API compatibles con OpenAI.", "Direct Connections settings updated": "Se actualizaron las configuraciones de las Conexiones Directas", @@ -319,7 +319,7 @@ "Do not install functions from sources you do not fully trust.": "No instale funciones desde fuentes que no confíe totalmente.", "Do not install tools from sources you do not fully trust.": "No instale herramientas desde fuentes que no confíe totalmente.", "Docling": "", - "Docling Server URL required.": "", + "Docling Server URL required.": "Se requiere la URL del servidor de Docling.", "Document": "Documento", "Document Intelligence": "Document Intelligence", "Document Intelligence endpoint and key required.": "Endpoint y clave de Document Intelligence requeridos.", @@ -390,7 +390,7 @@ "Enter Chunk Size": "Ingrese el tamaño del fragmento", "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Entre pares \"token:bias_value\" separados por comas (ejemplo: 5432:100, 413:-100)", "Enter description": "Ingrese la descripción", - "Enter Docling Server URL": "", + "Enter Docling Server URL": "Ingrese URL de Docling Server", "Enter Document Intelligence Endpoint": "Entre el Endpoint de Document Intelligence", "Enter Document Intelligence Key": "Entre la Clave de Document Intelligence", "Enter domains separated by commas (e.g., example.com,site.org)": "Entre dominios separados por comas (p.ej., ejemplo.com,sitio.org)", @@ -478,7 +478,7 @@ "Export Prompts": "Exportar Prompts", "Export to CSV": "Exportar a CSV", "Export Tools": "Exportar Herramientas", - "External": "", + "External": "Externo", "External Models": "Modelos Externos", "Failed to add file.": "No se pudo agregar el archivo.", "Failed to create API Key.": "No se pudo crear la clave API.", @@ -990,7 +990,7 @@ "System": "Sistema", "System Instructions": "Instrucciones del sistema", "System Prompt": "Prompt del sistema", - "Tags": "", + "Tags": "Etiquetas", "Tags Generation": "Generación de etiquetas", "Tags Generation Prompt": "Prompt de generación de etiquetas", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Muestreo libre de cola se utiliza para reducir el impacto de los tokens menos probables de la salida. Un valor más alto (por ejemplo, 2.0) reducirá más el impacto, mientras que un valor de 1.0 deshabilita esta configuración.", From a28436237c265011d698af0b5974140b4dbf8421 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 17:42:50 -0700 Subject: [PATCH 368/623] refac --- src/lib/components/layout/Sidebar.svelte | 2 +- .../layout/Sidebar/RecursiveFolder.svelte | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/lib/components/layout/Sidebar.svelte b/src/lib/components/layout/Sidebar.svelte index 0c8d0da1bef..d547779482d 100644 --- a/src/lib/components/layout/Sidebar.svelte +++ b/src/lib/components/layout/Sidebar.svelte @@ -93,7 +93,7 @@ folders[folder.id] = { ...(folders[folder.id] || {}), ...folder }; if (newFolderId && folder.id === newFolderId) { - folders[folder.id].isNew = true; + folders[folder.id].new = true; newFolderId = null; } } diff --git a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte index 334eb80bfa3..0940475d909 100644 --- a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte +++ b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte @@ -201,7 +201,7 @@ dragged = false; }; - onMount(() => { + onMount(async () => { open = folders[folderId].is_expanded; if (folderElement) { folderElement.addEventListener('dragover', onDragOver); @@ -216,12 +216,11 @@ folderElement.addEventListener('dragend', onDragEnd); } - if (folders[folderId].isNew) { - folders[folderId].isNew = false; - - setTimeout(() => { - editHandler(); - }, 100); + if (folders[folderId]?.new) { + delete folders[folderId].new; + + await tick(); + editHandler(); } }); From 87a06a1976cbf8854e24e6775c9939c0f1670238 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 17:46:11 -0700 Subject: [PATCH 369/623] fix: file delete from knowledge not working with bypass embedding --- backend/open_webui/routers/knowledge.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/backend/open_webui/routers/knowledge.py b/backend/open_webui/routers/knowledge.py index 19690455057..bc1e2429e9d 100644 --- a/backend/open_webui/routers/knowledge.py +++ b/backend/open_webui/routers/knowledge.py @@ -437,14 +437,24 @@ def remove_file_from_knowledge_by_id( ) # Remove content from the vector database - VECTOR_DB_CLIENT.delete( - collection_name=knowledge.id, filter={"file_id": form_data.file_id} - ) + try: + VECTOR_DB_CLIENT.delete( + collection_name=knowledge.id, filter={"file_id": form_data.file_id} + ) + except Exception as e: + log.debug("This was most likely caused by bypassing embedding processing") + log.debug(e) + pass - # Remove the file's collection from vector database - file_collection = f"file-{form_data.file_id}" - if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection): - VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection) + try: + # Remove the file's collection from vector database + file_collection = f"file-{form_data.file_id}" + if VECTOR_DB_CLIENT.has_collection(collection_name=file_collection): + VECTOR_DB_CLIENT.delete_collection(collection_name=file_collection) + except Exception as e: + log.debug("This was most likely caused by bypassing embedding processing") + log.debug(e) + pass # Delete file from database Files.delete_file_by_id(form_data.file_id) From d047eb46cce49838824d0af32e1bba7516ef42e5 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 20 Mar 2025 17:54:13 -0700 Subject: [PATCH 370/623] refac --- src/lib/components/chat/Messages/CitationsModal.svelte | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/components/chat/Messages/CitationsModal.svelte b/src/lib/components/chat/Messages/CitationsModal.svelte index 0542970ca93..cb740ce6960 100644 --- a/src/lib/components/chat/Messages/CitationsModal.svelte +++ b/src/lib/components/chat/Messages/CitationsModal.svelte @@ -128,11 +128,11 @@ {percentage.toFixed(2)}% - ({document.distance.toFixed(4)}) + ({(document?.distance ?? 0).toFixed(4)}) {:else} - {document.distance.toFixed(4)} + {(document?.distance ?? 0).toFixed(4)} {/if}
From 22f6e0f2f4cd922af1a24626e03e9b5b76d89002 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Fri, 21 Mar 2025 08:08:15 -0700 Subject: [PATCH 371/623] refac --- .../layout/Sidebar/RecursiveFolder.svelte | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte index 0940475d909..a7eb920d7d9 100644 --- a/src/lib/components/layout/Sidebar/RecursiveFolder.svelte +++ b/src/lib/components/layout/Sidebar/RecursiveFolder.svelte @@ -304,18 +304,15 @@ console.log('Edit'); await tick(); name = folders[folderId].name; - edit = true; + edit = true; await tick(); - // focus on the input and select all text - setTimeout(() => { - const input = document.getElementById(`folder-${folderId}-input`); - if (input) { - input.focus(); - input.select(); - } - }, 100); + const input = document.getElementById(`folder-${folderId}-input`); + + if (input) { + input.focus(); + } }; const exportHandler = async () => { @@ -404,6 +401,9 @@ id="folder-{folderId}-input" type="text" bind:value={name} + on:focus={(e) => { + e.target.select(); + }} on:blur={() => { nameUpdateHandler(); edit = false; @@ -437,7 +437,10 @@ > { - editHandler(); + // Requires a timeout to prevent the click event from closing the dropdown + setTimeout(() => { + editHandler(); + }, 200); }} on:delete={() => { showDeleteConfirm = true; From 966940cb00702678047fb7fff6f2a404be2c7270 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sat, 22 Mar 2025 14:59:17 +0900 Subject: [PATCH 372/623] feat: Added `redirect` parameter to /auth --- src/routes/+layout.svelte | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/routes/+layout.svelte b/src/routes/+layout.svelte index aef9719f164..b1567fd9e82 100644 --- a/src/routes/+layout.svelte +++ b/src/routes/+layout.svelte @@ -496,6 +496,9 @@ if ($config) { await setupSocket($config.features?.enable_websocket ?? true); + const currentUrl = `${window.location.pathname}${window.location.search}`; + const encodedUrl = encodeURIComponent(currentUrl); + if (localStorage.token) { // Get Session User Info const sessionUser = await getSessionUser(localStorage.token).catch((error) => { @@ -512,13 +515,13 @@ } else { // Redirect Invalid Session User to /auth Page localStorage.removeItem('token'); - await goto('/auth'); + await goto(`/auth?redirect=${encodedUrl}`); } } else { // Don't redirect if we're already on the auth page // Needed because we pass in tokens from OAuth logins via URL fragments if ($page.url.pathname !== '/auth') { - await goto('/auth'); + await goto(`/auth?redirect=${encodedUrl}`); } } } From bdd236fa3aa1efc038d2992a5a0f9a05e9a156ea Mon Sep 17 00:00:00 2001 From: Jonathan Flower Date: Sat, 22 Mar 2025 09:59:06 -0400 Subject: [PATCH 373/623] improved error handling for deleting collections that do not exist in chromadb --- .../open_webui/retrieval/vector/dbs/chroma.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/backend/open_webui/retrieval/vector/dbs/chroma.py b/backend/open_webui/retrieval/vector/dbs/chroma.py index 006ee207630..f15702cf111 100755 --- a/backend/open_webui/retrieval/vector/dbs/chroma.py +++ b/backend/open_webui/retrieval/vector/dbs/chroma.py @@ -166,12 +166,17 @@ def delete( filter: Optional[dict] = None, ): # Delete the items from the collection based on the ids. - collection = self.client.get_collection(name=collection_name) - if collection: - if ids: - collection.delete(ids=ids) - elif filter: - collection.delete(where=filter) + try: + collection = self.client.get_collection(name=collection_name) + if collection: + if ids: + collection.delete(ids=ids) + elif filter: + collection.delete(where=filter) + except Exception as e: + # If collection doesn't exist, that's fine - nothing to delete + log.debug(f"Attempted to delete from non-existent collection {collection_name}. Ignoring.") + pass def reset(self): # Resets the database. This will delete all collections and item entries. From 75b18f92b90108bf94a0e25490f927a7e54a8eca Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 22 Mar 2025 14:01:07 -0700 Subject: [PATCH 374/623] refac --- src/lib/components/AddConnectionModal.svelte | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/components/AddConnectionModal.svelte b/src/lib/components/AddConnectionModal.svelte index 7a82f340c38..52fd5199995 100644 --- a/src/lib/components/AddConnectionModal.svelte +++ b/src/lib/components/AddConnectionModal.svelte @@ -79,9 +79,9 @@ const submitHandler = async () => { loading = true; - if (!ollama && (!url || !key)) { + if (!ollama && !url) { loading = false; - toast.error('URL and Key are required'); + toast.error('URL is required'); return; } @@ -223,7 +223,7 @@ className="w-full text-sm bg-transparent placeholder:text-gray-300 dark:placeholder:text-gray-700 outline-hidden" bind:value={key} placeholder={$i18n.t('API Key')} - required={!ollama} + required={false} />
From d144592660608d1320d07ea949ad98b27564f4b5 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sat, 22 Mar 2025 16:21:05 +0900 Subject: [PATCH 375/623] chore: Remove `ENABLE_AUDIT_LOGS` and set the `AUDIT_LOG_LEVEL` NONE --- backend/open_webui/env.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index 27cc3a9a4d4..2a327aa5d81 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -414,13 +414,12 @@ def parse_section(section): #################################### # AUDIT LOGGING #################################### -ENABLE_AUDIT_LOGS = os.getenv("ENABLE_AUDIT_LOGS", "false").lower() == "true" # Where to store log file AUDIT_LOGS_FILE_PATH = f"{DATA_DIR}/audit.log" # Maximum size of a file before rotating into a new log file AUDIT_LOG_FILE_ROTATION_SIZE = os.getenv("AUDIT_LOG_FILE_ROTATION_SIZE", "10MB") # METADATA | REQUEST | REQUEST_RESPONSE -AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "REQUEST_RESPONSE").upper() +AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "NONE").upper() try: MAX_BODY_LOG_SIZE = int(os.environ.get("MAX_BODY_LOG_SIZE") or 2048) except ValueError: From c1f189a602b6ce224c782688a98357394c66dd3f Mon Sep 17 00:00:00 2001 From: Yak! Date: Sun, 23 Mar 2025 17:52:48 +0900 Subject: [PATCH 376/623] Fix inconsistent value check. --- src/lib/components/chat/Chat.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index ca766c9f765..e2b408059f0 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -384,7 +384,7 @@ if (event.data.type === 'input:prompt:submit') { console.debug(event.data.text); - if (prompt !== '') { + if (event.data.text !== '') { await tick(); submitPrompt(event.data.text); } From efd86e2cb4f2cb63f28fd67a0e4d2e945d5797a7 Mon Sep 17 00:00:00 2001 From: binxn <78713335+binxn@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:14:20 +0100 Subject: [PATCH 377/623] Updated middleware.py to add OpenRouter compatibility --- backend/open_webui/utils/middleware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index ccb45986545..dc0a7638f5b 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -1560,7 +1560,7 @@ async def stream_body_handler(response): value = delta.get("content") - reasoning_content = delta.get("reasoning_content") + reasoning_content = delta.get("reasoning_content") or delta.get("reasoning") if reasoning_content: if ( not content_blocks From e4078a6aee34eaba5214030a11e6f929a72e1e5f Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:12:14 +0100 Subject: [PATCH 378/623] Add new translations --- src/lib/i18n/locales/de-DE/translation.json | 66 ++++++++++----------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 221d8a20111..5b30e654786 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -5,7 +5,7 @@ "(e.g. `sh webui.sh --api`)": "(z. B. `sh webui.sh --api`)", "(latest)": "(neueste)", "{{ models }}": "{{ Modelle }}", - "{{COUNT}} hidden lines": "", + "{{COUNT}} hidden lines": "{{COUNT}} versteckte Zeilen", "{{COUNT}} Replies": "{{COUNT}} Antworten", "{{user}}'s Chats": "{{user}}s Unterhaltungen", "{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich", @@ -52,7 +52,7 @@ "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratoren haben jederzeit Zugriff auf alle Werkzeuge. Benutzer können im Arbeitsbereich zugewiesen.", "Advanced Parameters": "Erweiterte Parameter", "Advanced Params": "Erweiterte Parameter", - "All": "", + "All": "Alle", "All Documents": "Alle Dokumente", "All models deleted successfully": "Alle Modelle erfolgreich gelöscht", "Allow Chat Controls": "Chat-Steuerung erlauben", @@ -68,8 +68,8 @@ "Already have an account?": "Haben Sie bereits einen Account?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", "Always": "Immer", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "Code-Blöcke immer zuklappen", + "Always Expand Details": "Details immer aufklappen", "Amazing": "Fantastisch", "an assistant": "ein Assistent", "Analyzed": "Analysiert", @@ -97,7 +97,7 @@ "Are you sure?": "Sind Sie sicher?", "Arena Models": "Arena-Modelle", "Artifacts": "Artefakte", - "Ask": "", + "Ask": "Fragen", "Ask a question": "Stellen Sie eine Frage", "Assistant": "Assistent", "Attach file from knowledge": "Datei aus Wissensspeicher anhängen", @@ -169,7 +169,7 @@ "Ciphers": "Verschlüsselungen", "Citation": "Zitate", "Clear memory": "Alle Erinnerungen entfernen", - "Clear Memory": "", + "Clear Memory": "Alle Erinnerungen entfernen", "click here": "hier klicken", "Click here for filter guides.": "Klicken Sie hier für Filteranleitungen.", "Click here for help.": "Klicken Sie hier für Hilfe.", @@ -191,12 +191,12 @@ "Code execution": "Codeausführung", "Code Execution": "Codeausführung", "Code Execution Engine": "", - "Code Execution Timeout": "", + "Code Execution Timeout": "Timeout für Codeausführung", "Code formatted successfully": "Code erfolgreich formatiert", "Code Interpreter": "Code-Interpreter", "Code Interpreter Engine": "", "Code Interpreter Prompt Template": "", - "Collapse": "", + "Collapse": "Zuklappen", "Collection": "Kollektion", "Color": "Farbe", "ComfyUI": "ComfyUI", @@ -252,7 +252,7 @@ "Created At": "Erstellt am", "Created by": "Erstellt von", "CSV Import": "CSV-Import", - "Ctrl+Enter to Send": "", + "Ctrl+Enter to Send": "Strg+Enter zum Senden", "Current Model": "Aktuelles Modell", "Current Password": "Aktuelles Passwort", "Custom": "Benutzerdefiniert", @@ -284,7 +284,7 @@ "Delete folder?": "Ordner löschen?", "Delete function?": "Funktion löschen?", "Delete Message": "Nachricht löschen", - "Delete message?": "", + "Delete message?": "Nachricht löschen?", "Delete prompt?": "Prompt löschen?", "delete this link": "diesen Link löschen", "Delete tool?": "Werkzeug löschen?", @@ -295,7 +295,7 @@ "Describe your knowledge base and objectives": "Beschreibe deinen Wissensspeicher und deine Ziele", "Description": "Beschreibung", "Didn't fully follow instructions": "Nicht genau den Answeisungen gefolgt", - "Direct": "", + "Direct": "Direkt", "Direct Connections": "Direktverbindungen", "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Direktverbindungen ermöglichen es Benutzern, sich mit ihren eigenen OpenAI-kompatiblen API-Endpunkten zu verbinden.", "Direct Connections settings updated": "Direktverbindungs-Einstellungen aktualisiert", @@ -304,7 +304,7 @@ "Discover a model": "Entdecken Sie weitere Modelle", "Discover a prompt": "Entdecken Sie weitere Prompts", "Discover a tool": "Entdecken Sie weitere Werkzeuge", - "Discover how to use Open WebUI and seek support from the community.": "", + "Discover how to use Open WebUI and seek support from the community.": "Entdecke, wie Sie Open WebUI nutzen und erhalten Sie Unterstützung von der Community.", "Discover wonders": "Entdecken Sie Wunder", "Discover, download, and explore custom functions": "Entdecken und beziehen Sie benutzerdefinierte Funktionen", "Discover, download, and explore custom prompts": "Entdecken und beziehen Sie benutzerdefinierte Prompts", @@ -326,7 +326,7 @@ "Documentation": "Dokumentation", "Documents": "Dokumente", "does not make any external connections, and your data stays securely on your locally hosted server.": "stellt keine externen Verbindungen her, und Ihre Daten bleiben sicher auf Ihrem lokal gehosteten Server.", - "Domain Filter List": "", + "Domain Filter List": "Domain Filter-Liste", "Don't have an account?": "Haben Sie noch kein Benutzerkonto?", "don't install random functions from sources you don't trust.": "installieren Sie keine Funktionen aus Quellen, denen Sie nicht vertrauen.", "don't install random tools from sources you don't trust.": "installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vertrauen.", @@ -365,8 +365,8 @@ "Embedding model set to \"{{embedding_model}}\"": "Embedding-Modell auf \"{{embedding_model}}\" gesetzt", "Enable API Key": "API-Schlüssel aktivieren", "Enable autocomplete generation for chat messages": "Automatische Vervollständigung für Chat-Nachrichten aktivieren", - "Enable Code Execution": "", - "Enable Code Interpreter": "", + "Enable Code Execution": "Codeausführung aktivieren", + "Enable Code Interpreter": "Code-Interpreter aktivieren", "Enable Community Sharing": "Community-Freigabe aktivieren", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.", @@ -400,17 +400,17 @@ "Enter Google PSE Engine Id": "Geben Sie die Google PSE-Engine-ID ein", "Enter Image Size (e.g. 512x512)": "Geben Sie die Bildgröße ein (z. B. 512x512)", "Enter Jina API Key": "Geben Sie den Jina-API-Schlüssel ein", - "Enter Jupyter Password": "", - "Enter Jupyter Token": "", - "Enter Jupyter URL": "", "Enter Kagi Search API Key": "Geben sie den Kagi Search API-Schlüssel ein", - "Enter Key Behavior": "", + "Enter Jupyter Password": "Geben Sie das Jupyter-Passwort ein", + "Enter Jupyter Token": "Geben Sie den Jupyter-Token ein", + "Enter Jupyter URL": "Geben Sie die Jupyter-URL ein", + "Enter Key Behavior": "Verhalten von 'Enter'", "Enter language codes": "Geben Sie die Sprachcodes ein", "Enter Model ID": "Geben Sie die Modell-ID ein", "Enter model tag (e.g. {{modelTag}})": "Geben Sie den Model-Tag ein", "Enter Mojeek Search API Key": "Geben Sie den Mojeek Search API-Schlüssel ein", "Enter Number of Steps (e.g. 50)": "Geben Sie die Anzahl an Schritten ein (z. B. 50)", - "Enter Perplexity API Key": "", + "Enter Perplexity API Key": "Geben Sie den Perplexity API-Key ein", "Enter proxy URL (e.g. https://user:password@host:port)": "Geben sie die Proxy-URL ein (z. B. https://user:password@host:port)", "Enter reasoning effort": "Geben Sie den Schlussfolgerungsaufwand ein", "Enter Sampler (e.g. Euler a)": "Geben Sie den Sampler ein (z. B. Euler a)", @@ -433,8 +433,8 @@ "Enter Tavily API Key": "Geben Sie den Tavily-API-Schlüssel ein", "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Geben sie die öffentliche URL Ihrer WebUI ein. Diese URL wird verwendet, um Links in den Benachrichtigungen zu generieren.", "Enter Tika Server URL": "Geben Sie die Tika-Server-URL ein", - "Enter timeout in seconds": "", - "Enter to Send": "", + "Enter timeout in seconds": "Geben Sie den Timeout in Sekunden ein", + "Enter to Send": "'Enter' zum Senden", "Enter Top K": "Geben Sie Top K ein", "Enter URL (e.g. http://127.0.0.1:7860/)": "Geben Sie die URL ein (z. B. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Geben Sie die URL ein (z. B. http://localhost:11434)", @@ -461,10 +461,10 @@ "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", "Exclude": "Ausschließen", "Execute code for analysis": "Code für Analyse ausführen", - "Expand": "", + "Expand": "Aufklappen", "Experimental": "Experimentell", - "Explain": "", - "Explain this section to me in more detail": "", + "Explain": "Erklären", + "Explain this section to me in more detail": "Erkläre mir diesen Abschnitt im Detail", "Explore the cosmos": "Erforschen Sie das Universum", "Export": "Exportieren", "Export All Archived Chats": "Alle archivierten Unterhaltungen exportieren", @@ -478,7 +478,7 @@ "Export Prompts": "Prompts exportieren", "Export to CSV": "Als CSV exportieren", "Export Tools": "Werkzeuge exportieren", - "External": "", + "External": "Extern", "External Models": "Externe Modelle", "Failed to add file.": "Fehler beim Hinzufügen der Datei.", "Failed to create API Key.": "Fehler beim Erstellen des API-Schlüssels.", @@ -517,7 +517,7 @@ "Form": "Formular", "Format your variables using brackets like this:": "Formatieren Sie Ihre Variablen mit Klammern, wie hier:", "Frequency Penalty": "Frequenzstrafe", - "Full Context Mode": "", + "Full Context Mode": "Voll-Kontext Modus", "Function": "Funktion", "Function Calling": "Funktionsaufruf", "Function created successfully": "Funktion erfolgreich erstellt", @@ -815,7 +815,7 @@ "Presence Penalty": "", "Previous 30 days": "Vorherige 30 Tage", "Previous 7 days": "Vorherige 7 Tage", - "Private": "", + "Private": "Privat", "Profile Image": "Profilbild", "Prompt": "Prompt", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (z. B. \"Erzähle mir eine interessante Tatsache über das Römische Reich\")", @@ -825,7 +825,7 @@ "Prompt updated successfully": "Prompt erfolgreich aktualisiert", "Prompts": "Prompts", "Prompts Access": "Prompt-Zugriff", - "Public": "", + "Public": "Öffentlich", "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen", "Pull a model from Ollama.com": "Modell von Ollama.com beziehen", "Query Generation Prompt": "Abfragegenerierungsprompt", @@ -1021,7 +1021,7 @@ "Theme": "Design", "Thinking...": "Denke nach...", "This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?", - "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "", + "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Dieser Kanal wurde am {{createdAt}} erstellt. Dies ist der Beginn des {{channelName}} Kanals.", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.", "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", @@ -1080,7 +1080,7 @@ "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?", - "Trust Proxy Environment": "", + "Trust Proxy Environment": "Proxy-Umgebung vertrauen", "TTS Model": "TTS-Modell", "TTS Settings": "TTS-Einstellungen", "TTS Voice": "TTS-Stimme", @@ -1102,7 +1102,7 @@ "Updated": "Aktualisiert", "Updated at": "Aktualisiert am", "Updated At": "Aktualisiert am", - "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Upgrade auf einen lizenzierten Plan für erweiterte Funktionen wie individuelles Design, Branding und dedizierten Support.", "Upload": "Hochladen", "Upload a GGUF model": "GGUF-Model hochladen", "Upload directory": "Upload-Verzeichnis", @@ -1131,7 +1131,7 @@ "Valves updated successfully": "Valves erfolgreich aktualisiert", "variable": "Variable", "variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.", - "Verify Connection": "", + "Verify Connection": "Verbindung verifizieren", "Version": "Version", "Version {{selectedVersion}} of {{totalVersions}}": "Version {{selectedVersion}} von {{totalVersions}}", "View Replies": "Antworten anzeigen", From 73715538ed8e2e6188e2e0edf4213f93044499ad Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:12:37 +0100 Subject: [PATCH 379/623] Fix spelling error --- src/lib/i18n/locales/de-DE/translation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 5b30e654786..a8d00019d15 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -400,10 +400,10 @@ "Enter Google PSE Engine Id": "Geben Sie die Google PSE-Engine-ID ein", "Enter Image Size (e.g. 512x512)": "Geben Sie die Bildgröße ein (z. B. 512x512)", "Enter Jina API Key": "Geben Sie den Jina-API-Schlüssel ein", - "Enter Kagi Search API Key": "Geben sie den Kagi Search API-Schlüssel ein", "Enter Jupyter Password": "Geben Sie das Jupyter-Passwort ein", "Enter Jupyter Token": "Geben Sie den Jupyter-Token ein", "Enter Jupyter URL": "Geben Sie die Jupyter-URL ein", + "Enter Kagi Search API Key": "Geben Sie den Kagi Search API-Schlüssel ein", "Enter Key Behavior": "Verhalten von 'Enter'", "Enter language codes": "Geben Sie die Sprachcodes ein", "Enter Model ID": "Geben Sie die Modell-ID ein", From 137f16a1604b23937efa23cba51cf519ba97061f Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:34:43 +0100 Subject: [PATCH 380/623] Rename chats --- src/lib/i18n/locales/de-DE/translation.json | 84 ++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index a8d00019d15..a591802311d 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -7,11 +7,11 @@ "{{ models }}": "{{ Modelle }}", "{{COUNT}} hidden lines": "{{COUNT}} versteckte Zeilen", "{{COUNT}} Replies": "{{COUNT}} Antworten", - "{{user}}'s Chats": "{{user}}s Unterhaltungen", + "{{user}}'s Chats": "{{user}}s Chats", "{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich", "*Prompt node ID(s) are required for image generation": "*Prompt-Node-ID(s) sind für die Bildgenerierung erforderlich", "A new version (v{{LATEST_VERSION}}) is now available.": "Eine neue Version (v{{LATEST_VERSION}}) ist jetzt verfügbar.", - "A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Unterhaltungstitel oder Websuchanfragen generieren.", + "A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Chat-Titel oder Websuchanfragen generieren.", "a user": "ein Benutzer", "About": "Über", "Accept autocomplete generation / Jump to prompt variable": "Automatische Vervollständigung akzeptieren / Zur Prompt-Variable springen", @@ -56,12 +56,12 @@ "All Documents": "Alle Dokumente", "All models deleted successfully": "Alle Modelle erfolgreich gelöscht", "Allow Chat Controls": "Chat-Steuerung erlauben", - "Allow Chat Delete": "Löschen von Unterhaltungen erlauben", - "Allow Chat Deletion": "Löschen von Unterhaltungen erlauben", - "Allow Chat Edit": "Bearbeiten von Unterhaltungen erlauben", + "Allow Chat Delete": "Löschen von Chats erlauben", + "Allow Chat Deletion": "Löschen von Chats erlauben", + "Allow Chat Edit": "Bearbeiten von Chats erlauben", "Allow File Upload": "Hochladen von Dateien erlauben", "Allow non-local voices": "Nicht-lokale Stimmen erlauben", - "Allow Temporary Chat": "Temporäre Unterhaltungen erlauben", + "Allow Temporary Chat": "Temporäre Chats erlauben", "Allow User Location": "Standort freigeben", "Allow Voice Interruption in Call": "Unterbrechung durch Stimme im Anruf zulassen", "Allowed Endpoints": "Erlaubte Endpunkte", @@ -87,13 +87,13 @@ "applies to all users with the \"user\" role": "gilt für alle Benutzer mit der Rolle \"Benutzer\"", "April": "April", "Archive": "Archivieren", - "Archive All Chats": "Alle Unterhaltungen archivieren", - "Archived Chats": "Archivierte Unterhaltungen", + "Archive All Chats": "Alle Chats archivieren", + "Archived Chats": "Archivierte Chats", "archived-chat-export": "archivierter-chat-export", "Are you sure you want to clear all memories? This action cannot be undone.": "Sind Sie sicher, dass Sie alle Erinnerungen löschen möchten? Diese Handlung kann nicht rückgängig gemacht werden.", "Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?", "Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?", - "Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Unterhaltungen wiederherstellen möchten?", + "Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Chats wiederherstellen möchten?", "Are you sure?": "Sind Sie sicher?", "Arena Models": "Arena-Modelle", "Artifacts": "Artefakte", @@ -152,14 +152,14 @@ "Character limit for autocomplete generation input": "Zeichenlimit für die Eingabe der automatischen Vervollständigung", "Chart new frontiers": "Neue Wege beschreiten", "Chat": "Gespräch", - "Chat Background Image": "Hintergrundbild des Unterhaltungsfensters", + "Chat Background Image": "Hintergrundbild des Chat-Fensters", "Chat Bubble UI": "Chat Bubble UI", "Chat Controls": "Chat-Steuerung", "Chat direction": "Textrichtung", - "Chat Overview": "Unterhaltungsübersicht", - "Chat Permissions": "Unterhaltungsberechtigungen", - "Chat Tags Auto-Generation": "Automatische Generierung von Unterhaltungstags", - "Chats": "Unterhaltungen", + "Chat Overview": "Chat-Übersicht", + "Chat Permissions": "Chat-Berechtigungen", + "Chat Tags Auto-Generation": "Automatische Generierung von Chat-Tags", + "Chats": "Chats", "Check Again": "Erneut überprüfen", "Check for updates": "Nach Updates suchen", "Checking for updates...": "Sucht nach Updates...", @@ -276,11 +276,11 @@ "Default User Role": "Standardbenutzerrolle", "Delete": "Löschen", "Delete a model": "Ein Modell löschen", - "Delete All Chats": "Alle Unterhaltungen löschen", + "Delete All Chats": "Alle Chats löschen", "Delete All Models": "Alle Modelle löschen", - "Delete chat": "Unterhaltung löschen", - "Delete Chat": "Unterhaltung löschen", - "Delete chat?": "Unterhaltung löschen?", + "Delete chat": "Chat löschen", + "Delete Chat": "Chat löschen", + "Delete chat?": "Chat löschen?", "Delete folder?": "Ordner löschen?", "Delete function?": "Funktion löschen?", "Delete Message": "Nachricht löschen", @@ -338,7 +338,7 @@ "Download Database": "Datenbank exportieren", "Drag and drop a file to upload or select a file to view": "Ziehen Sie eine Datei zum Hochladen oder wählen Sie eine Datei zum Anzeigen aus", "Draw": "Zeichnen", - "Drop any files here to add to the conversation": "Ziehen Sie beliebige Dateien hierher, um sie der Unterhaltung hinzuzufügen", + "Drop any files here to add to the conversation": "Ziehen Sie beliebige Dateien hierher, um sie dem Chat hinzuzufügen", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "z. B. '30s','10m'. Gültige Zeiteinheiten sind 's', 'm', 'h'.", "e.g. 60": "z. B. 60", "e.g. A filter to remove profanity from text": "z. B. Ein Filter, um Schimpfwörter aus Text zu entfernen", @@ -467,10 +467,10 @@ "Explain this section to me in more detail": "Erkläre mir diesen Abschnitt im Detail", "Explore the cosmos": "Erforschen Sie das Universum", "Export": "Exportieren", - "Export All Archived Chats": "Alle archivierten Unterhaltungen exportieren", - "Export All Chats (All Users)": "Alle Unterhaltungen exportieren (alle Benutzer)", - "Export chat (.json)": "Unterhaltung exportieren (.json)", - "Export Chats": "Unterhaltungen exportieren", + "Export All Archived Chats": "Alle archivierten Chats exportieren", + "Export All Chats (All Users)": "Alle Chats exportieren (alle Benutzer)", + "Export chat (.json)": "Chat exportieren (.json)", + "Export Chats": "Chats exportieren", "Export Config to JSON File": "Exportiere Konfiguration als JSON-Datei", "Export Functions": "Funktionen exportieren", "Export Models": "Modelle exportieren", @@ -554,7 +554,7 @@ "Group updated successfully": "Gruppe erfolgreich aktualisiert", "Groups": "Gruppen", "Haptic Feedback": "Haptisches Feedback", - "has no conversations.": "hat keine Unterhaltungen.", + "has no conversations.": "hat keine Chats.", "Hello, {{name}}": "Hallo, {{name}}", "Help": "Hilfe", "Help us create the best community leaderboard by sharing your feedback history!": "Helfen Sie uns, die beste Community-Bestenliste zu erstellen, indem Sie Ihren Feedback-Verlauf teilen!", @@ -579,7 +579,7 @@ "Image Prompt Generation Prompt": "Prompt für die Bild-Prompt-Generierung", "Image Settings": "Bildeinstellungen", "Images": "Bilder", - "Import Chats": "Unterhaltungen importieren", + "Import Chats": "Chats importieren", "Import Config from JSON File": "Konfiguration aus JSON-Datei importieren", "Import Functions": "Funktionen importieren", "Import Models": "Modelle importieren", @@ -675,7 +675,7 @@ "Memory updated successfully": "Erinnerung erfolgreich aktualisiert", "Merge Responses": "Antworten zusammenführen", "Message rating should be enabled to use this feature": "Antwortbewertung muss aktiviert sein, um diese Funktion zu verwenden", - "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Nachrichten, die Sie nach der Erstellung Ihres Links senden, werden nicht geteilt. Nutzer mit der URL können die freigegebene Unterhaltung einsehen.", + "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Nachrichten, die Sie nach der Erstellung Ihres Links senden, werden nicht geteilt. Nutzer mit der URL können den freigegebenen Chat einsehen.", "Min P": "Min P", "Minimum Score": "Mindestpunktzahl", "Mirostat": "Mirostat", @@ -708,7 +708,7 @@ "Name": "Name", "Name your knowledge base": "Benennen Sie Ihren Wissensspeicher", "Native": "Nativ", - "New Chat": "Neue Unterhaltung", + "New Chat": "Neuer Chat", "New Folder": "Neuer Ordner", "New Password": "Neues Passwort", "new-channel": "neuer-kanal", @@ -865,7 +865,7 @@ "Result": "Ergebnis", "Retrieval": "", "Retrieval Query Generation": "Abfragegenerierung", - "Rich Text Input for Chat": "Rich-Text-Eingabe für Unterhaltungen", + "Rich Text Input for Chat": "Rich-Text-Eingabe für Chats", "RK": "RK", "Role": "Rolle", "Rosé Pine": "Rosé Pine", @@ -879,12 +879,12 @@ "Save As Copy": "Als Kopie speichern", "Save Tag": "Tag speichern", "Saved": "Gespeichert", - "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Das direkte Speichern von Unterhaltungen im Browser-Speicher wird nicht mehr unterstützt. Bitte nehmen Sie einen Moment Zeit, um Ihre Unterhaltungen zu exportieren und zu löschen, indem Sie auf die Schaltfläche unten klicken. Keine Sorge, Sie können Ihre Unterhaltungen problemlos über das Backend wieder importieren.", + "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Das direkte Speichern von Chats im Browser-Speicher wird nicht mehr unterstützt. Bitte nehmen Sie einen Moment Zeit, um Ihre Chats zu exportieren und zu löschen, indem Sie auf die Schaltfläche unten klicken. Keine Sorge, Sie können Ihre Chats problemlos über das Backend wieder importieren.", "Scroll to bottom when switching between branches": "Beim Wechsel zwischen Branches nach unten scrollen", "Search": "Suchen", "Search a model": "Modell suchen", "Search Base": "Suchbasis", - "Search Chats": "Unterhaltungen durchsuchen...", + "Search Chats": "Chats durchsuchen...", "Search Collection": "Sammlung durchsuchen", "Search Filters": "Suchfilter", "search for tags": "nach Tags suchen", @@ -955,7 +955,7 @@ "Settings": "Einstellungen", "Settings saved successfully!": "Einstellungen erfolgreich gespeichert!", "Share": "Teilen", - "Share Chat": "Unterhaltung teilen", + "Share Chat": "Chat teilen", "Share to Open WebUI Community": "Mit OpenWebUI Community teilen", "Show": "Anzeigen", "Show \"What's New\" modal on login": "\"Was gibt's Neues\"-Modal beim Anmelden anzeigen", @@ -977,7 +977,7 @@ "Speech-to-Text Engine": "Sprache-zu-Text-Engine", "Stop": "Stop", "Stop Sequence": "Stop-Sequenz", - "Stream Chat Response": "Unterhaltungsantwort streamen", + "Stream Chat Response": "Chat-Antwort streamen", "STT Model": "STT-Modell", "STT Settings": "STT-Einstellungen", "Subtitle (e.g. about the Roman Empire)": "Untertitel (z. B. über das Römische Reich)", @@ -1001,7 +1001,7 @@ "Tell us more:": "Erzähl uns mehr", "Temperature": "Temperatur", "Template": "Vorlage", - "Temporary Chat": "Temporäre Unterhaltung", + "Temporary Chat": "Temporärer Chat", "Text Splitter": "Text-Splitter", "Text-to-Speech Engine": "Text-zu-Sprache-Engine", "Tfs Z": "Tfs Z", @@ -1015,14 +1015,14 @@ "The LDAP attribute that maps to the username that users use to sign in.": "Das LDAP-Attribut, das dem Benutzernamen zugeordnet ist, den Benutzer zum Anmelden verwenden.", "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Die Bestenliste befindet sich derzeit in der Beta-Phase, und es ist möglich, dass wir die Bewertungsberechnungen anpassen, während wir den Algorithmus verfeinern.", "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Die maximale Dateigröße in MB. Wenn die Dateigröße dieses Limit überschreitet, wird die Datei nicht hochgeladen.", - "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig in der Unterhaltung verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.", + "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig im Chat verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.", "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", "Theme": "Design", "Thinking...": "Denke nach...", "This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?", "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Dieser Kanal wurde am {{createdAt}} erstellt. Dies ist der Beginn des {{channelName}} Kanals.", - "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!", + "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Chats sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.", "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", @@ -1039,10 +1039,10 @@ "Tika": "Tika", "Tika Server URL required.": "Tika-Server-URL erforderlich.", "Tiktoken": "Tiktoken", - "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Tipp: Aktualisieren Sie mehrere Variablenfelder nacheinander, indem Sie nach jedem Ersetzen die Tabulatortaste im Eingabefeld der Unterhaltung drücken.", + "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Tipp: Aktualisieren Sie mehrere Variablenfelder nacheinander, indem Sie nach jedem Ersetzen die Tabulatortaste im Eingabefeld des Chats drücken.", "Title": "Titel", "Title (e.g. Tell me a fun fact)": "Titel (z. B. Erzähl mir einen lustigen Fakt)", - "Title Auto-Generation": "Unterhaltungstitel automatisch generieren", + "Title Auto-Generation": "Chat-Titel automatisch generieren", "Title cannot be an empty string.": "Titel darf nicht leer sein.", "Title Generation": "Titelgenerierung", "Title Generation Prompt": "Prompt für Titelgenerierung", @@ -1052,7 +1052,7 @@ "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Um auf das WebUI zugreifen zu können, wenden Sie sich bitte an einen Administrator. Administratoren können den Benutzerstatus über das Admin-Panel verwalten.", "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Um Wissensdatenbanken hier anzuhängen, fügen Sie sie zunächst dem Arbeitsbereich \"Wissen\" hinzu.", "To learn more about available endpoints, visit our documentation.": "Um mehr über verfügbare Endpunkte zu erfahren, besuchen Sie unsere Dokumentation.", - "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Um Ihre Privatsphäre zu schützen, werden nur Bewertungen, Modell-IDs, Tags und Metadaten aus Ihrem Feedback geteilt – Ihre Unterhaltungen bleiben privat und werden nicht einbezogen.", + "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Um Ihre Privatsphäre zu schützen, werden nur Bewertungen, Modell-IDs, Tags und Metadaten aus Ihrem Feedback geteilt – Ihre Chats bleiben privat und werden nicht einbezogen.", "To select actions here, add them to the \"Functions\" workspace first.": "Um Aktionen auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.", "To select filters here, add them to the \"Functions\" workspace first.": "Um Filter auszuwählen, fügen Sie diese zunächst dem Arbeitsbereich „Funktionen“ hinzu.", "To select toolkits here, add them to the \"Tools\" workspace first.": "Um Toolkits auszuwählen, fügen Sie sie zunächst dem Arbeitsbereich „Werkzeuge“ hinzu.", @@ -1089,8 +1089,8 @@ "Uh-oh! There was an issue with the response.": "Oh nein! Es gab ein Problem mit der Antwort.", "UI": "Oberfläche", "Unarchive All": "Alle wiederherstellen", - "Unarchive All Archived Chats": "Alle archivierten Unterhaltungen wiederherstellen", - "Unarchive Chat": "Unterhaltung wiederherstellen", + "Unarchive All Archived Chats": "Alle archivierten Chats wiederherstellen", + "Unarchive Chat": "Chat wiederherstellen", "Unlock mysteries": "Geheimnisse entsperren", "Unpin": "Lösen", "Unravel secrets": "Geheimnisse lüften", @@ -1179,8 +1179,8 @@ "You cannot upload an empty file.": "Sie können keine leere Datei hochladen.", "You do not have permission to upload files": "Sie haben keine Berechtigung, Dateien hochzuladen", "You do not have permission to upload files.": "Sie haben keine Berechtigung zum Hochladen von Dateien.", - "You have no archived conversations.": "Du hast keine archivierten Unterhaltungen.", - "You have shared this chat": "Sie haben diese Unterhaltung geteilt", + "You have no archived conversations.": "Du hast keine archivierten Chats.", + "You have shared this chat": "Sie haben diesen Chat geteilt", "You're a helpful assistant.": "Du bist ein hilfreicher Assistent.", "You're now logged in.": "Sie sind jetzt eingeloggt.", "Your account status is currently pending activation.": "Ihr Kontostatus ist derzeit ausstehend und wartet auf Aktivierung.", From f2866ed85840a3703efded5eb2494c1785affa0f Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:11:37 +0100 Subject: [PATCH 381/623] Adjust naming --- src/lib/i18n/locales/de-DE/translation.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index a591802311d..ee0417d8c20 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -47,8 +47,8 @@ "Adjusting these settings will apply changes universally to all users.": "Das Anpassen dieser Einstellungen wird Änderungen universell auf alle Benutzer anwenden.", "admin": "Administrator", "Admin": "Administrator", - "Admin Panel": "Administrationsbereich", - "Admin Settings": "Administrationsbereich", + "Admin Panel": "Administration", + "Admin Settings": "Administration", "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratoren haben jederzeit Zugriff auf alle Werkzeuge. Benutzer können im Arbeitsbereich zugewiesen.", "Advanced Parameters": "Erweiterte Parameter", "Advanced Params": "Erweiterte Parameter", @@ -153,7 +153,7 @@ "Chart new frontiers": "Neue Wege beschreiten", "Chat": "Gespräch", "Chat Background Image": "Hintergrundbild des Chat-Fensters", - "Chat Bubble UI": "Chat Bubble UI", + "Chat Bubble UI": "Sprechblasen-Layout", "Chat Controls": "Chat-Steuerung", "Chat direction": "Textrichtung", "Chat Overview": "Chat-Übersicht", @@ -596,7 +596,7 @@ "Install from Github URL": "Installiere von der Github-URL", "Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden", "Integration": "", - "Interface": "Benutzeroberfläche", + "Interface": "Oberfläche", "Invalid file format.": "Ungültiges Dateiformat.", "Invalid Tag": "Ungültiger Tag", "is typing...": "schreibt ...", From c714bd87390d12812ef1fea3d387bbfb70cda57d Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 23 Mar 2025 11:45:55 -0700 Subject: [PATCH 382/623] refac --- backend/open_webui/utils/middleware.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index ccb45986545..d97baf92e8a 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -1079,8 +1079,6 @@ async def background_tasks_handler(): for filter_id in get_sorted_filter_ids(model) ] - print(f"{filter_functions=}") - # Streaming response if event_emitter and event_caller: task_id = str(uuid4()) # Create a unique task ID. From e5b7188379553b52436776af8ed85fa7b77fcc2f Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 23 Mar 2025 11:50:40 -0700 Subject: [PATCH 383/623] refac: ollama only param --- src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte index 59d230d1b7f..67b1f4dc107 100644 --- a/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte +++ b/src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte @@ -961,6 +961,7 @@
{$i18n.t('Context Length')} + {$i18n.t('(Ollama)')}
+
+ +
+
+
{ + e.preventDefault(); + submitHandler(); + }} + > +
+
+
+
{$i18n.t('URL')}
+ +
+ +
+
+ +
+ + + +
+
+ +
+ {$i18n.t(`WebUI will make requests to "{{URL}}/openapi.json"`, { + URL: url + })} +
+ +
+
+
{$i18n.t('Key')}
+ +
+ +
+
+
+
+ +
+ {#if edit} + + {/if} + + +
+
+
+
+
+ diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index 2892d436cfb..fe733d616e9 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -119,6 +119,9 @@ let imageGenerationEnabled = false; let webSearchEnabled = false; let codeInterpreterEnabled = false; + + let toolServers = []; + let chat = null; let tags = []; @@ -191,6 +194,8 @@ setToolIds(); } + $: toolServers = ($settings?.toolServers ?? []).filter((server) => server?.config?.enable); + const setToolIds = async () => { if (!$tools) { tools.set(await getTools(localStorage.token)); @@ -2033,6 +2038,7 @@ bind:codeInterpreterEnabled bind:webSearchEnabled bind:atSelectedModel + {toolServers} transparentBackground={$settings?.backgroundImageUrl ?? false} {stopResponse} {createMessagePair} @@ -2086,6 +2092,7 @@ bind:webSearchEnabled bind:atSelectedModel transparentBackground={$settings?.backgroundImageUrl ?? false} + {toolServers} {stopResponse} {createMessagePair} on:upload={async (e) => { diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index 7db31010b63..a1f82ed4452 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -68,6 +68,8 @@ export let prompt = ''; export let files = []; + export let toolServers = []; + export let selectedToolIds = []; export let imageGenerationEnabled = false; @@ -1175,14 +1177,14 @@ @@ -1195,13 +1197,13 @@ on:click|preventDefault={() => (imageGenerationEnabled = !imageGenerationEnabled)} type="button" - class="px-1.5 @sm:px-2.5 py-1.5 flex gap-1.5 items-center text-sm rounded-full font-medium transition-colors duration-300 focus:outline-hidden max-w-full overflow-hidden {imageGenerationEnabled + class="px-1.5 @lg:px-2.5 py-1.5 flex gap-1.5 items-center text-sm rounded-full font-medium transition-colors duration-300 focus:outline-hidden max-w-full overflow-hidden {imageGenerationEnabled ? 'bg-gray-100 dark:bg-gray-500/20 text-gray-600 dark:text-gray-400' : 'bg-transparent text-gray-600 dark:text-gray-300 border-gray-200 hover:bg-gray-100 dark:hover:bg-gray-800 '}" > @@ -1214,13 +1216,13 @@ on:click|preventDefault={() => (codeInterpreterEnabled = !codeInterpreterEnabled)} type="button" - class="px-1.5 @sm:px-2.5 py-1.5 flex gap-1.5 items-center text-sm rounded-full font-medium transition-colors duration-300 focus:outline-hidden max-w-full overflow-hidden {codeInterpreterEnabled + class="px-1.5 @lg:px-2.5 py-1.5 flex gap-1.5 items-center text-sm rounded-full font-medium transition-colors duration-300 focus:outline-hidden max-w-full overflow-hidden {codeInterpreterEnabled ? 'bg-gray-100 dark:bg-gray-500/20 text-gray-600 dark:text-gray-400' : 'bg-transparent text-gray-600 dark:text-gray-300 border-gray-200 hover:bg-gray-100 dark:hover:bg-gray-800 '}" > @@ -1231,6 +1233,43 @@
+ {#if toolServers.length > 0} + +
+ + + + + + + {toolServers.length} + +
+
+ {/if} + {#if !history?.currentId || history.messages[history.currentId]?.done == true} + +
+ +
+ {#each servers as server, idx} + { + updateHandler(); + }} + onDelete={() => { + servers = servers.filter((_, i) => i !== idx); + updateHandler(); + }} + /> + {/each} +
+
+ +
+
+ {$i18n.t('Connect to your own OpenAPI compatible external tool servers.')} +
+ {$i18n.t( + 'CORS must be properly configured by the provider to allow requests from Open WebUI.' + )} +
+
+
+
+ {:else} +
+
+ +
+
+ {/if} +
+ +
+ +
+ diff --git a/src/lib/components/chat/Settings/Tools/Connection.svelte b/src/lib/components/chat/Settings/Tools/Connection.svelte new file mode 100644 index 00000000000..b61bac87888 --- /dev/null +++ b/src/lib/components/chat/Settings/Tools/Connection.svelte @@ -0,0 +1,96 @@ + + + { + showDeleteConfirmDialog = true; + }} + onSubmit={(connection) => { + url = connection.url; + key = connection.key; + config = connection.config; + onSubmit(connection); + }} +/> + + { + onDelete(); + showConfigModal = false; + }} +/> + +
+ + {#if !(config?.enable ?? true)} +
+ {/if} +
+
+ +
+ + +
+
+ +
+ + + +
+
diff --git a/src/lib/components/chat/SettingsModal.svelte b/src/lib/components/chat/SettingsModal.svelte index 7d32a9718cd..1e341f38086 100644 --- a/src/lib/components/chat/SettingsModal.svelte +++ b/src/lib/components/chat/SettingsModal.svelte @@ -17,6 +17,7 @@ import Personalization from './Settings/Personalization.svelte'; import Search from '../icons/Search.svelte'; import Connections from './Settings/Connections.svelte'; + import Tools from './Settings/Tools.svelte'; const i18n = getContext('i18n'); @@ -127,6 +128,11 @@ title: 'Connections', keywords: [] }, + { + id: 'tools', + title: 'Tools', + keywords: [] + }, { id: 'personalization', title: 'Personalization', @@ -481,6 +487,34 @@
{$i18n.t('Connections')}
{/if} + {:else if tabId === 'tools'} + {#if $user.role === 'admin' || ($user.role === 'user' && $config?.features?.enable_direct_tools)} + + {/if} {:else if tabId === 'personalization'}
diff --git a/src/lib/components/AddServerModal.svelte b/src/lib/components/AddServerModal.svelte index fed9f0477b2..160701d27bd 100644 --- a/src/lib/components/AddServerModal.svelte +++ b/src/lib/components/AddServerModal.svelte @@ -132,8 +132,8 @@
- {$i18n.t(`WebUI will make requests to "{{URL}}/openapi.json"`, { - URL: url + {$i18n.t(`WebUI will make requests to "{{url}}/openapi.json"`, { + url: url })}
diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 3edfa1b1d0d..9188a1536a2 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -782,7 +782,6 @@
{/if} - {#if querySettings.hybrid === true}
diff --git a/src/lib/components/chat/Settings/Tools/Connection.svelte b/src/lib/components/chat/Settings/Tools/Connection.svelte index b61bac87888..50416d6a5e1 100644 --- a/src/lib/components/chat/Settings/Tools/Connection.svelte +++ b/src/lib/components/chat/Settings/Tools/Connection.svelte @@ -52,8 +52,8 @@
diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index 39665f802e8..14d14b798fa 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -6,6 +6,7 @@ "(latest)": "(الأخير)", "(Ollama)": "", "{{ models }}": "{{ نماذج }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "دردشات {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "اتصالات", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "أدخل Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "الرابط (e.g. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "URL (e.g. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "إدارة خطوط الأنابيب", + "Manage Tool Servers": "", "March": "مارس", "Max Tokens (num_predict)": "ماكس توكنز (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "هل تواجه مشكلة في الوصول", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "ما هو الجديد", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index d1a3ac354ef..1a3e095b327 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -6,6 +6,7 @@ "(latest)": "(последна)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} Отговори", "{{user}}'s Chats": "{{user}}'s чатове", @@ -215,6 +216,7 @@ "Confirm your action": "Потвърдете действието си", "Confirm your new password": "Потвърдете новата си парола", "Connect to your own OpenAI compatible API endpoints.": "Свържете се със собствени крайни точки на API, съвместими с OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Връзки", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Свържете се с администратор за достъп до WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Въведете Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Въведете URL (напр. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Въведете URL (напр. http://localhost:11434)", "Enter your current password": "Въведете текущата си парола", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP сървърът е актуализиран", "Leaderboard": "Класация", "Leave empty for unlimited": "Оставете празно за неограничено", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Оставете празно, за да включите всички модели от крайната точка \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Оставете празно, за да включите всички модели от крайната точка \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Оставете празно, за да включите всички модели или изберете конкретни модели", "Leave empty to use the default prompt, or enter a custom prompt": "Оставете празно, за да използвате промпта по подразбиране, или въведете персонализиран промпт", "Leave model field empty to use the default model.": "Оставете полето за модел празно, за да използвате модела по подразбиране.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Управление на Ollama API връзки", "Manage OpenAI API Connections": "Управление на OpenAI API връзки", "Manage Pipelines": "Управление на пайплайни", + "Manage Tool Servers": "", "March": "Март", "Max Tokens (num_predict)": "Макс токени (num_predict)", "Max Upload Count": "Максимален брой качвания", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Инструментите имат система за извикване на функции, която позволява произволно изпълнение на код", "Tools have a function calling system that allows arbitrary code execution.": "Инструментите имат система за извикване на функции, която позволява произволно изпълнение на код.", "Top K": "Топ K", + "Top K Reranker": "", "Top P": "Топ P", "Transformers": "Трансформатори", "Trouble accessing Ollama?": "Проблеми с достъпа до Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI ще прави заявки към \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI ще прави заявки към \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Какво се опитвате да постигнете?", "What are you working on?": "Върху какво работите?", "What’s New in": "", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index 04339323d52..3c6da453823 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -6,6 +6,7 @@ "(latest)": "(সর্বশেষ)", "(Ollama)": "", "{{ models }}": "{{ মডেল}}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}র চ্যাটস", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "কানেকশনগুলো", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Top K লিখুন", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "ইউআরএল দিন (যেমন http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "ইউআরএল দিন (যেমন http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "পাইপলাইন পরিচালনা করুন", + "Manage Tool Servers": "", "March": "মার্চ", "Max Tokens (num_predict)": "সর্বোচ্চ টোকেন (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Ollama এক্সেস করতে সমস্যা হচ্ছে?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "এতে নতুন কী", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index ea63ff1cc39..4a399f4abee 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -6,6 +6,7 @@ "(latest)": "(últim)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} línies ocultes", "{{COUNT}} Replies": "{{COUNT}} respostes", "{{user}}'s Chats": "Els xats de {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirma la teva acció", "Confirm your new password": "Confirma la teva nova contrasenya", "Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Connexions", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament.", "Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Entra el temps màxim en segons", "Enter to Send": "Enter per enviar", "Enter Top K": "Introdueix Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Introdueix l'URL (p. ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Introdueix l'URL (p. ex. http://localhost:11434)", "Enter your current password": "Introdueix la teva contrasenya actual", @@ -634,8 +637,8 @@ "LDAP server updated": "Servidor LDAP actualitzat", "Leaderboard": "Tauler de classificació", "Leave empty for unlimited": "Deixar-ho buit per il·limitat", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Deixar-ho buit per incloure tots els models del punt de connexió \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Deixar-ho buit per incloure tots els models del punt de connexió \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Deixa-ho en blanc per incloure tots els models o selecciona models específics", "Leave empty to use the default prompt, or enter a custom prompt": "Deixa-ho en blanc per utilitzar la indicació predeterminada o introdueix una indicació personalitzada", "Leave model field empty to use the default model.": "Deixa el camp de model buit per utilitzar el model per defecte.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Gestionar les connexions a l'API d'Ollama", "Manage OpenAI API Connections": "Gestionar les connexions a l'API d'OpenAI", "Manage Pipelines": "Gestionar les Pipelines", + "Manage Tool Servers": "", "March": "Març", "Max Tokens (num_predict)": "Nombre màxim de Tokens (num_predict)", "Max Upload Count": "Nombre màxim de càrregues", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari", "Tools have a function calling system that allows arbitrary code execution.": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformadors", "Trouble accessing Ollama?": "Problemes en accedir a Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "URL de WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI farà peticions a \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI farà peticions a \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Què intentes aconseguir?", "What are you working on?": "En què estàs treballant?", "What’s New in": "Què hi ha de nou a", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index ed6d68a3e59..cdeeea210a5 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -6,6 +6,7 @@ "(latest)": "", "(Ollama)": "", "{{ models }}": "", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Mga koneksyon", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Pagsulod sa Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Pagsulod sa URL (e.g. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "", + "Manage Tool Servers": "", "March": "", "Max Tokens (num_predict)": "", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Ibabaw nga P", "Transformers": "", "Trouble accessing Ollama?": "Adunay mga problema sa pag-access sa Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Unsay bag-o sa", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 525e274e584..fcbeefa6a94 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -6,6 +6,7 @@ "(latest)": "Nejnovější", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}'s konverzace", @@ -215,6 +216,7 @@ "Confirm your action": "Potvrďte svoji akci", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Připojení", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktujte administrátora pro přístup k webovému rozhraní.", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Zadejte horní K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Zadejte URL (např. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Zadejte URL (např. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "Žebříček", "Leave empty for unlimited": "Nechte prázdné pro neomezeně", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Nechte prázdné pro zahrnutí všech modelů nebo vyberte konkrétní modely.", "Leave empty to use the default prompt, or enter a custom prompt": "Nechte prázdné pro použití výchozího podnětu, nebo zadejte vlastní podnět.", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Správa pipelines", + "Manage Tool Servers": "", "March": "Březen", "Max Tokens (num_predict)": "Maximální počet tokenů (num_predict)", "Max Upload Count": "Maximální počet nahrání", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Nástroje mají systém volání funkcí, který umožňuje libovolné spouštění kódu.", "Tools have a function calling system that allows arbitrary code execution.": "Nástroje mají systém volání funkcí, který umožňuje spuštění libovolného kódu.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Máte potíže s přístupem k Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Co je nového v", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 104f5ae5be0..57bf5449d0a 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -6,6 +6,7 @@ "(latest)": "(seneste)", "(Ollama)": "", "{{ models }}": "{{ modeller }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}s chats", @@ -215,6 +216,7 @@ "Confirm your action": "Bekræft din handling", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Forbindelser", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakt din administrator for adgang til WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Indtast Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Indtast URL (f.eks. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Indtast URL (f.eks. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "Lad stå tomt for ubegrænset", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "Lad stå tomt for at bruge standardprompten, eller indtast en brugerdefineret prompt", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Administrer pipelines", + "Manage Tool Servers": "", "March": "Marts", "Max Tokens (num_predict)": "Maks. tokens (num_predict)", "Max Upload Count": "Maks. uploadantal", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Værktøjer har et funktionkaldssystem, der tillader vilkårlig kodeudførelse", "Tools have a function calling system that allows arbitrary code execution.": "Værktøjer har et funktionkaldssystem, der tillader vilkårlig kodeudførelse.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemer med at få adgang til Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Nyheder i", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index 172bc6b8f5e..126a879d11a 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -6,6 +6,7 @@ "(latest)": "(neueste)", "(Ollama)": "", "{{ models }}": "{{ Modelle }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} versteckte Zeilen", "{{COUNT}} Replies": "{{COUNT}} Antworten", "{{user}}'s Chats": "{{user}}s Chats", @@ -215,6 +216,7 @@ "Confirm your action": "Bestätigen Sie Ihre Aktion.", "Confirm your new password": "Neues Passwort bestätigen", "Connect to your own OpenAI compatible API endpoints.": "Verbinden Sie sich zu Ihren OpenAI-kompatiblen Endpunkten.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Verbindungen", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche", @@ -635,8 +637,8 @@ "LDAP server updated": "LDAP-Server aktualisiert", "Leaderboard": "Bestenliste", "Leave empty for unlimited": "Leer lassen für unbegrenzt", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Leer lassen, um alle Modelle vom \"{{URL}}/api/tags\"-Endpunkt einzuschließen", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Leer lassen, um alle Modelle vom \"{{URL}}/models\"-Endpunkt einzuschließen", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Leer lassen, um alle Modelle einzuschließen oder spezifische Modelle auszuwählen", "Leave empty to use the default prompt, or enter a custom prompt": "Leer lassen, um den Standardprompt zu verwenden, oder geben Sie einen benutzerdefinierten Prompt ein", "Leave model field empty to use the default model.": "Leer lassen, um das Standardmodell zu verwenden.", @@ -663,6 +665,7 @@ "Manage Ollama API Connections": "Ollama-API-Verbindungen verwalten", "Manage OpenAI API Connections": "OpenAI-API-Verbindungen verwalten", "Manage Pipelines": "Pipelines verwalten", + "Manage Tool Servers": "", "March": "März", "Max Tokens (num_predict)": "Maximale Tokenanzahl (num_predict)", "Max Upload Count": "Maximale Anzahl der Uploads", @@ -1079,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Werkezuge verfügen über ein Funktionssystem, das die Ausführung beliebigen Codes ermöglicht", "Tools have a function calling system that allows arbitrary code execution.": "Werkzeuge verfügen über ein Funktionssystem, das die Ausführung beliebigen Codes ermöglicht.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?", @@ -1156,6 +1160,7 @@ "WebUI URL": "WebUI-URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI wird Anfragen an \"{{url}}/api/chat\" senden", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI wird Anfragen an \"{{url}}/chat/completions\" senden", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Was versuchen Sie zu erreichen?", "What are you working on?": "Woran arbeiten Sie?", "What’s New in": "Neuigkeiten von", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index b100be158b2..473956c11c0 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -6,6 +6,7 @@ "(latest)": "(much latest)", "(Ollama)": "", "{{ models }}": "", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Connections", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Enter Top Wow", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Enter URL (e.g. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "", + "Manage Tool Servers": "", "March": "", "Max Tokens (num_predict)": "", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K very top", + "Top K Reranker": "", "Top P": "Top P very top", "Transformers": "", "Trouble accessing Ollama?": "Trouble accessing Ollama? Much trouble?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "What’s New in much new", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index fc5e4da4c01..8306f9d7406 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(τελευταίο)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Συνομιλίες του {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Επιβεβαιώστε την ενέργειά σας", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Συνδέσεις", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Επικοινωνήστε με τον Διαχειριστή για Πρόσβαση στο WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Εισάγετε το Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Εισάγετε το URL (π.χ. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Εισάγετε το URL (π.χ. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "Ο διακομιστής LDAP ενημερώθηκε", "Leaderboard": "Κατάταξη", "Leave empty for unlimited": "Αφήστε κενό για απεριόριστο", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Αφήστε κενό για να συμπεριλάβετε όλα τα μοντέλα από το endpoint \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Αφήστε κενό για να συμπεριλάβετε όλα τα μοντέλα από το endpoint \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Αφήστε κενό για να χρησιμοποιήσετε όλα τα μοντέλα ή επιλέξτε συγκεκριμένα μοντέλα", "Leave empty to use the default prompt, or enter a custom prompt": "Αφήστε κενό για να χρησιμοποιήσετε την προεπιλεγμένη προτροπή, ή εισάγετε μια προσαρμοσμένη προτροπή", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Διαχείριση Συνδέσεων API Ollama", "Manage OpenAI API Connections": "Διαχείριση Συνδέσεων API OpenAI", "Manage Pipelines": "Διαχείριση Καναλιών", + "Manage Tool Servers": "", "March": "Μάρτιος", "Max Tokens (num_predict)": "Μέγιστος Αριθμός Tokens (num_predict)", "Max Upload Count": "Μέγιστος Αριθμός Ανεβάσματος", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Τα εργαλεία διαθέτουν ένα σύστημα κλήσης λειτουργιών που επιτρέπει την αυθαίρετη εκτέλεση κώδικα", "Tools have a function calling system that allows arbitrary code execution.": "Τα εργαλεία διαθέτουν ένα σύστημα κλήσης λειτουργιών που επιτρέπει την αυθαίρετη εκτέλεση κώδικα.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Προβλήματα πρόσβασης στο Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "Το WebUI θα κάνει αιτήματα στο \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "Το WebUI θα κάνει αιτήματα στο \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Τι προσπαθείτε να πετύχετε?", "What are you working on?": "Τι εργάζεστε;", "What’s New in": "Τι νέο υπάρχει στο", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index fdde499076b..f8390be5d39 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -6,6 +6,7 @@ "(latest)": "", "(Ollama)": "", "{{ models }}": "", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -635,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -663,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "", + "Manage Tool Servers": "", "March": "", "Max Tokens (num_predict)": "", "Max Upload Count": "", @@ -1079,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "", + "Top K Reranker": "", "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", @@ -1156,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index fdde499076b..f8390be5d39 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -6,6 +6,7 @@ "(latest)": "", "(Ollama)": "", "{{ models }}": "", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -635,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -663,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "", + "Manage Tool Servers": "", "March": "", "Max Tokens (num_predict)": "", "Max Upload Count": "", @@ -1079,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "", + "Top K Reranker": "", "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", @@ -1156,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index a461657057b..c823cfb4324 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -6,6 +6,7 @@ "(latest)": "(latest)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} líneas ocultas", "{{COUNT}} Replies": "{{COUNT}} Respuestas", "{{user}}'s Chats": "Chats de {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirma tu acción", "Confirm your new password": "Confirmar tu nueva contraseña", "Connect to your own OpenAI compatible API endpoints.": "Conecta a tus propios endpoints de API compatibles con OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Conexiones", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Esfuerzo de razonamiento para modelos de razonamiento. Solo aplicable a modelos de razonamiento de proveedores específicos que soportan el esfuerzo de razonamiento.", "Contact Admin for WebUI Access": "Contacta el administrador para obtener acceso al WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Introduzca el tiempo de espera en segundos", "Enter to Send": "Enter para Enviar", "Enter Top K": "Ingrese el Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Ingrese la URL (p.ej., http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Ingrese la URL (p.ej., http://localhost:11434)", "Enter your current password": "Ingrese su contraseña actual", @@ -634,8 +637,8 @@ "LDAP server updated": "Servidor LDAP actualizado", "Leaderboard": "Tablero de líderes", "Leave empty for unlimited": "Deje vacío para ilimitado", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Deje vacío para incluir todos los modelos desde el endpoint \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Deje vacío para incluir todos los modelos desde el endpoint \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Deje vacío para incluir todos los modelos o seleccione modelos específicos", "Leave empty to use the default prompt, or enter a custom prompt": "Deje vacío para usar el prompt predeterminado, o ingrese un prompt personalizado", "Leave model field empty to use the default model.": "Deje el campo del modelo vacío para usar el modelo predeterminado.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Gestionar conexiones API de Ollama", "Manage OpenAI API Connections": "Gestionar conexiones API de OpenAI", "Manage Pipelines": "Administrar Pipelines", + "Manage Tool Servers": "", "March": "Marzo", "Max Tokens (num_predict)": "Máximo de fichas (num_predict)", "Max Upload Count": "Cantidad máxima de cargas", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Las herramientas tienen un sistema de llamadas de funciones que permite la ejecución de código arbitrario", "Tools have a function calling system that allows arbitrary code execution.": "Las herramientas tienen un sistema de llamada de funciones que permite la ejecución de código arbitrario.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformadores", "Trouble accessing Ollama?": "¿Problemas para acceder a Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "URL del WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI hará solicitudes a \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI hará solicitudes a \"{{url}}/chat/completions\"WebUI hará solicitudes a \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "¿Qué estás tratando de lograr?", "What are you working on?": "¿En qué estás trabajando?", "What’s New in": "Novedades en", diff --git a/src/lib/i18n/locales/et-EE/translation.json b/src/lib/i18n/locales/et-EE/translation.json index 12e7120ffbd..ab1395a6888 100644 --- a/src/lib/i18n/locales/et-EE/translation.json +++ b/src/lib/i18n/locales/et-EE/translation.json @@ -6,6 +6,7 @@ "(latest)": "(uusim)", "(Ollama)": "", "{{ models }}": "{{ mudelid }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} peidetud rida", "{{COUNT}} Replies": "{{COUNT}} vastust", "{{user}}'s Chats": "{{user}} vestlused", @@ -215,6 +216,7 @@ "Confirm your action": "Kinnita oma toiming", "Confirm your new password": "Kinnita oma uus parool", "Connect to your own OpenAI compatible API endpoints.": "Ühendu oma OpenAI-ga ühilduvate API lõpp-punktidega.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Ühendused", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Piirab arutluse pingutust arutlusvõimelistele mudelitele. Kohaldatav ainult konkreetsete pakkujate arutlusmudelitele, mis toetavad arutluspingutust.", "Contact Admin for WebUI Access": "Võtke WebUI juurdepääsu saamiseks ühendust administraatoriga", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Sisestage aegumine sekundites", "Enter to Send": "Enter saatmiseks", "Enter Top K": "Sisestage Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Sisestage URL (nt http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Sisestage URL (nt http://localhost:11434)", "Enter your current password": "Sisestage oma praegune parool", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP server uuendatud", "Leaderboard": "Edetabel", "Leave empty for unlimited": "Jäta tühjaks piiranguta kasutamiseks", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/api/tags\" lõpp-punktist", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jäta tühjaks, et kaasata kõik mudelid \"{{URL}}/models\" lõpp-punktist", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Jäta tühjaks, et kaasata kõik mudelid või vali konkreetsed mudelid", "Leave empty to use the default prompt, or enter a custom prompt": "Jäta tühjaks, et kasutada vaikimisi vihjet, või sisesta kohandatud vihje", "Leave model field empty to use the default model.": "Jäta mudeli väli tühjaks, et kasutada vaikimisi mudelit.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Halda Ollama API ühendusi", "Manage OpenAI API Connections": "Halda OpenAI API ühendusi", "Manage Pipelines": "Halda torustikke", + "Manage Tool Servers": "", "March": "Märts", "Max Tokens (num_predict)": "Max tokeneid (num_predict)", "Max Upload Count": "Maksimaalne üleslaadimiste arv", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist", "Tools have a function calling system that allows arbitrary code execution.": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Probleeme Ollama juurdepääsuga?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI teeb päringuid aadressile \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI teeb päringuid aadressile \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Mida te püüate saavutada?", "What are you working on?": "Millega te tegelete?", "What’s New in": "Mis on uut", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index fc724f95baa..ad43b8361ec 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -6,6 +6,7 @@ "(latest)": "(azkena)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}-ren Txatak", @@ -215,6 +216,7 @@ "Confirm your action": "Berretsi zure ekintza", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Konexioak", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Jarri harremanetan Administratzailearekin WebUI Sarbiderako", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Sartu Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Sartu URLa (adib. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Sartu URLa (adib. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP zerbitzaria eguneratu da", "Leaderboard": "Sailkapena", "Leave empty for unlimited": "Utzi hutsik mugarik ez jartzeko", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Utzi hutsik \"{{URL}}/api/tags\" endpointuko eredu guztiak sartzeko", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Utzi hutsik \"{{URL}}/models\" endpointuko eredu guztiak sartzeko", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Utzi hutsik eredu guztiak sartzeko edo hautatu eredu zehatzak", "Leave empty to use the default prompt, or enter a custom prompt": "Utzi hutsik prompt lehenetsia erabiltzeko, edo sartu prompt pertsonalizatu bat", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Kudeatu Ollama API Konexioak", "Manage OpenAI API Connections": "Kudeatu OpenAI API Konexioak", "Manage Pipelines": "Kudeatu Pipeline-ak", + "Manage Tool Servers": "", "March": "Martxoa", "Max Tokens (num_predict)": "Token maximoak (num_predict)", "Max Upload Count": "Karga kopuru maximoa", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Tresnek kode arbitrarioa exekutatzeko aukera ematen duen funtzio deitzeko sistema dute", "Tools have a function calling system that allows arbitrary code execution.": "Tresnek kode arbitrarioa exekutatzeko aukera ematen duen funtzio deitzeko sistema dute.", "Top K": "Goiko K", + "Top K Reranker": "", "Top P": "Goiko P", "Transformers": "Transformatzaileak", "Trouble accessing Ollama?": "Arazoak Ollama atzitzeko?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI-k eskaerak egingo ditu \"{{url}}/api/chat\"-era", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI-k eskaerak egingo ditu \"{{url}}/chat/completions\"-era", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Zer lortu nahi duzu?", "What are you working on?": "Zertan ari zara lanean?", "What’s New in": "Zer berri honetan:", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index a09dd0c0241..addd5d639c9 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(آخرین)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} گفتگوهای", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "ارتباطات", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "برای دسترسی به WebUI با مدیر تماس بگیرید", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "مقدار Top K را وارد کنید", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "مقدار URL را وارد کنید (مثال http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "مقدار URL را وارد کنید (مثال http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "مدیریت خطوط لوله", + "Manage Tool Servers": "", "March": "مارچ", "Max Tokens (num_predict)": "توکنهای بیشینه (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "در دسترسی به اولاما مشکل دارید؟", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "موارد جدید در", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index 6905bb15261..3139ca6e12f 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -6,6 +6,7 @@ "(latest)": "(uusin)", "(Ollama)": "", "{{ models }}": "{{ mallit }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} piilotettua riviä", "{{COUNT}} Replies": "{{COUNT}} vastausta", "{{user}}'s Chats": "{{user}}:n keskustelut", @@ -215,6 +216,7 @@ "Confirm your action": "Vahvista toimintasi", "Confirm your new password": "Vahvista uusi salasanasi", "Connect to your own OpenAI compatible API endpoints.": "Yhdistä oma OpenAI yhteensopiva API päätepiste.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Yhteydet", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Ota yhteyttä ylläpitäjään WebUI-käyttöä varten", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Aseta aikakatkaisu sekunneissa", "Enter to Send": "Enter lähettääksesi", "Enter Top K": "Kirjoita Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Kirjoita verkko-osoite (esim. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Kirjoita verkko-osoite (esim. http://localhost:11434)", "Enter your current password": "Kirjoita nykyinen salasanasi", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP-palvelin päivitetty", "Leaderboard": "Tulosluettelo", "Leave empty for unlimited": "Jätä tyhjäksi rajattomaksi", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Jätä tyhjäksi, jos haluat sisällyttää kaikki mallit \"{{URL}}/api/tags\" -päätepistestä", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jätä tyhjäksi, jos haluat sisällyttää kaikki mallit \"{{URL}}/models\" -päätepistestä", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Jätä tyhjäksi, jos haluat sisällyttää kaikki mallit tai valitse tietyt mallit", "Leave empty to use the default prompt, or enter a custom prompt": "Jätä tyhjäksi käyttääksesi oletuskehotetta tai kirjoita mukautettu kehote", "Leave model field empty to use the default model.": "Jätä malli kenttä tyhjäksi käyttääksesi oletus mallia.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Hallitse Ollama API -yhteyksiä", "Manage OpenAI API Connections": "Hallitse OpenAI API -yhteyksiä", "Manage Pipelines": "Hallitse putkia", + "Manage Tool Servers": "", "March": "maaliskuu", "Max Tokens (num_predict)": "Tokenien enimmäismäärä (num_predict)", "Max Upload Count": "Latausten enimmäismäärä", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Työkaluilla on toimintokutsuihin perustuva järjestelmä, joka sallii mielivaltaisen koodin suorittamisen", "Tools have a function calling system that allows arbitrary code execution.": "Työkalut sallivat mielivaltaisen koodin suorittamisen toimintokutsuilla.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Muunnokset", "Trouble accessing Ollama?": "Ongelmia Ollama-yhteydessä?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI-osoite", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI lähettää pyyntöjä osoitteeseen \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI lähettää pyyntöjä osoitteeseen \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Mitä yrität saavuttaa?", "What are you working on?": "Mitä olet työskentelemässä?", "What’s New in": "Mitä uutta", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index fc2766ca5d1..2afe197cc3a 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -6,6 +6,7 @@ "(latest)": "(dernier)", "(Ollama)": "", "{{ models }}": "{{ modèles }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Discussions de {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirmez votre action", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Connexions", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Entrez les Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Entrez l'URL (par ex. {http://127.0.0.1:7860/})", "Enter URL (e.g. http://localhost:11434)": "Entrez l'URL (par ex. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Gérer les pipelines", + "Manage Tool Servers": "", "March": "Mars", "Max Tokens (num_predict)": "Tokens maximaux (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Rencontrez-vous des difficultés pour accéder à Ollama ?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Quoi de neuf", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index eb478509733..fb77f9a1cac 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(dernière version)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} réponses", "{{user}}'s Chats": "Conversations de {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirmer votre action", "Confirm your new password": "Confirmer votre nouveau mot de passe", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Connexions", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contacter l'administrateur pour obtenir l'accès à WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Entrez les Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Entrez l'URL (par ex. {http://127.0.0.1:7860/})", "Enter URL (e.g. http://localhost:11434)": "Entrez l'URL (par ex. http://localhost:11434)", "Enter your current password": "Entrez votre mot de passe actuel", @@ -634,8 +637,8 @@ "LDAP server updated": "Serveur LDAP mis à jour", "Leaderboard": "Classement", "Leave empty for unlimited": "Laissez vide pour illimité", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Laissez vide pour inclure tous les modèles depuis le point de terminaison \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Laissez vide pour inclure tous les modèles depuis le point de terminaison \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Laissez vide pour inclure tous les modèles ou sélectionnez des modèles spécifiques", "Leave empty to use the default prompt, or enter a custom prompt": "Laissez vide pour utiliser le prompt par défaut, ou entrez un prompt personnalisé", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Gérer les connexions API Ollama", "Manage OpenAI API Connections": "Gérer les connexions API OpenAI", "Manage Pipelines": "Gérer les pipelines", + "Manage Tool Servers": "", "March": "Mars", "Max Tokens (num_predict)": "Nb max de tokens (num_predict)", "Max Upload Count": "Nombre maximal de téléversements", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire", "Tools have a function calling system that allows arbitrary code execution.": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "Problèmes d'accès à Ollama ?", @@ -1155,6 +1160,7 @@ "WebUI URL": "URL de WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI fera des requêtes à \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI fera des requêtes à \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Que cherchez-vous à accomplir ?", "What are you working on?": "Sur quoi travaillez-vous ?", "What’s New in": "Quoi de neuf dans", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 950a1446253..e4b17c72954 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -6,6 +6,7 @@ "(latest)": "(האחרון)", "(Ollama)": "", "{{ models }}": "{{ דגמים }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "צ'אטים של {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "חיבורים", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "הזן Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "הזן כתובת URL (למשל http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "הזן כתובת URL (למשל http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "ניהול צינורות", + "Manage Tool Servers": "", "March": "מרץ", "Max Tokens (num_predict)": "מקסימום אסימונים (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "קשה לגשת לOllama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "מה חדש ב", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index e6d0418e4d2..42339333f76 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -6,6 +6,7 @@ "(latest)": "(latest)", "(Ollama)": "", "{{ models }}": "{{ मॉडल }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} की चैट", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "सम्बन्ध", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "शीर्ष K दर्ज करें", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "यूआरएल दर्ज करें (उदा. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "यूआरएल दर्ज करें (उदा. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "पाइपलाइनों का प्रबंधन करें", + "Manage Tool Servers": "", "March": "मार्च", "Max Tokens (num_predict)": "अधिकतम टोकन (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "शीर्ष K", + "Top K Reranker": "", "Top P": "शीर्ष P", "Transformers": "", "Trouble accessing Ollama?": "Ollama तक पहुँचने में परेशानी हो रही है?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "इसमें नया क्या है", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index 79305033b4e..2e52065aad0 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(najnovije)", "(Ollama)": "", "{{ models }}": "{{ modeli }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Razgovori korisnika {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Povezivanja", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktirajte admina za WebUI pristup", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Unesite Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Unesite URL (npr. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Unesite URL (npr. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Upravljanje cjevovodima", + "Manage Tool Servers": "", "March": "Ožujak", "Max Tokens (num_predict)": "Maksimalan broj tokena (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemi s pristupom Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Što je novo u", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index b8b20afb424..ac8db57f99f 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -6,6 +6,7 @@ "(latest)": "(legújabb)", "(Ollama)": "", "{{ models }}": "{{ modellek }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} beszélgetései", @@ -215,6 +216,7 @@ "Confirm your action": "Erősítsd meg a műveletet", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Kapcsolatok", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Lépj kapcsolatba az adminnal a WebUI hozzáférésért", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Add meg a Top K értéket", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Add meg az URL-t (pl. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Add meg az URL-t (pl. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "Ranglista", "Leave empty for unlimited": "Hagyja üresen a korlátlan használathoz", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Hagyja üresen az összes modell használatához, vagy válasszon ki konkrét modelleket", "Leave empty to use the default prompt, or enter a custom prompt": "Hagyja üresen az alapértelmezett prompt használatához, vagy adjon meg egyéni promptot", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Folyamatok kezelése", + "Manage Tool Servers": "", "March": "Március", "Max Tokens (num_predict)": "Maximum tokenek (num_predict)", "Max Upload Count": "Maximum feltöltések száma", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Az eszközök olyan függvényhívó rendszerrel rendelkeznek, amely lehetővé teszi tetszőleges kód végrehajtását", "Tools have a function calling system that allows arbitrary code execution.": "Az eszközök olyan függvényhívó rendszerrel rendelkeznek, amely lehetővé teszi tetszőleges kód végrehajtását.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problémája van az Ollama elérésével?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index e7d5616e2bc..f65c33e0f11 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -6,6 +6,7 @@ "(latest)": "(terbaru)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Obrolan {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Konfirmasi tindakan Anda", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Koneksi", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Hubungi Admin untuk Akses WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Masukkan Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Masukkan URL (mis. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Masukkan URL (mis. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Mengelola Saluran Pipa", + "Manage Tool Servers": "", "March": "Maret", "Max Tokens (num_predict)": "Token Maksimal (num_prediksi)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "K atas", + "Top K Reranker": "", "Top P": "P Atas", "Transformers": "", "Trouble accessing Ollama?": "Kesulitan mengakses Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Apa yang Baru di", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index 86b72abfb5d..8ccf351005d 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -6,6 +6,7 @@ "(latest)": "(is déanaí)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} Freagra", "{{user}}'s Chats": "Comhráite {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Deimhnigh do ghníomh", "Confirm your new password": "Deimhnigh do phasfhocal nua", "Connect to your own OpenAI compatible API endpoints.": "Ceangail le do chríochphointí API atá comhoiriúnach le OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Naisc", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Déan teagmháil le Riarachán le haghaidh Rochtana WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Cuir isteach an t-am istigh i soicindí", "Enter to Send": "", "Enter Top K": "Cuir isteach Barr K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Iontráil URL (m.sh. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Iontráil URL (m.sh. http://localhost:11434)", "Enter your current password": "Cuir isteach do phasfhocal reatha", @@ -634,8 +637,8 @@ "LDAP server updated": "Nuashonraíodh freastalaí LDAP", "Leaderboard": "An Clár Ceannairí", "Leave empty for unlimited": "Fág folamh le haghaidh neamhtheoranta", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Fág folamh chun gach múnla ó chríochphointe \"{{URL}}/api/tags\" a chur san áireamh", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Fág folamh chun gach múnla ón gcríochphointe \"{{URL}}/models\" a chur san áireamh", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Fág folamh chun gach múnla a chur san áireamh nó roghnaigh múnlaí sonracha", "Leave empty to use the default prompt, or enter a custom prompt": "Fág folamh chun an leid réamhshocraithe a úsáid, nó cuir isteach leid saincheaptha", "Leave model field empty to use the default model.": "Fág réimse an mhúnla folamh chun an tsamhail réamhshocraithe a úsáid.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Bainistigh Naisc API Ollama", "Manage OpenAI API Connections": "Bainistigh Naisc API OpenAI", "Manage Pipelines": "Bainistigh píblín", + "Manage Tool Servers": "", "March": "Márta", "Max Tokens (num_predict)": "Comharthaí Uasta (num_predicate)", "Max Upload Count": "Líon Uaslódála Max", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Tá córas glaonna feidhme ag uirlisí a cheadaíonn forghníomhú cód treallach", "Tools have a function calling system that allows arbitrary code execution.": "Tá córas glaonna feidhme ag uirlisí a cheadaíonn forghníomhú cód treallach.", "Top K": "Barr K", + "Top K Reranker": "", "Top P": "Barr P", "Transformers": "Claochladáin", "Trouble accessing Ollama?": "Deacracht teacht ar Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "URL WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "Déanfaidh WebUI iarratais ar \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "Déanfaidh WebUI iarratais ar \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Cad atá tú ag iarraidh a bhaint amach?", "What are you working on?": "Cad air a bhfuil tú ag obair?", "What’s New in": "Cad atá Nua i", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index c71c40ff93d..9e4f851328b 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -6,6 +6,7 @@ "(latest)": "(ultima)", "(Ollama)": "", "{{ models }}": "{{ modelli }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} Chat", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Connessioni", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Inserisci Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Inserisci URL (ad esempio http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Inserisci URL (ad esempio http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Gestire le pipeline", + "Manage Tool Servers": "", "March": "Marzo", "Max Tokens (num_predict)": "Numero massimo di gettoni (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemi di accesso a Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Novità in", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 9616ce3e70d..7a99aaee60c 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -6,6 +6,7 @@ "(latest)": "(最新)", "(Ollama)": "", "{{ models }}": "{{ モデル }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} のチャット", @@ -215,6 +216,7 @@ "Confirm your action": "あなたのアクションの確認", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "接続", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WEBUIへの接続について管理者に問い合わせ下さい。", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "トップ K を入力してください", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "URL を入力してください (例: http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "URL を入力してください (例: http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "空欄なら無制限", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "カスタムプロンプトを入力。空欄ならデフォルトプロンプト", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "パイプラインの管理", + "Manage Tool Servers": "", "March": "3月", "Max Tokens (num_predict)": "最大トークン数 (num_predict)", "Max Upload Count": "最大アップロード数", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "トップ K", + "Top K Reranker": "", "Top P": "トップ P", "Transformers": "", "Trouble accessing Ollama?": "Ollama へのアクセスに問題がありますか?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "新機能", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 7fff7fef5d9..674127653f0 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -6,6 +6,7 @@ "(latest)": "(უახლესი)", "(Ollama)": "", "{{ models }}": "{{ მოდელები }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} პასუხი", "{{user}}'s Chats": "{{user}}-ის ჩათები", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "კავშირები", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "შეიყვანეთ Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "შეიყვანეთ ბმული (მაგ: http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "შეიყვანეთ ბმული (მაგ: http://localhost:11434)", "Enter your current password": "შეიყვანეთ თქვენი მიმდინარე პაროლი", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "ლიდერების დაფა", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "მილსადენების მართვა", + "Manage Tool Servers": "", "March": "მარტი", "Max Tokens (num_predict)": "მაქს. ტოკეტები (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "ტოპ K", + "Top K Reranker": "", "Top P": "ტოპ P", "Transformers": "", "Trouble accessing Ollama?": "Ollama-ს ვერ უკავშირდები?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "რა არის ახალი", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index a6f59507d61..db0349ad1f0 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(최근)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}의 채팅", @@ -215,6 +216,7 @@ "Confirm your action": "액션 확인", "Confirm your new password": "새로운 비밀번호를 한 번 더 입력해 주세요", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "연결", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WebUI 접속을 위해서는 관리자에게 연락에 연락하십시오", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Top K 입력", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "URL 입력(예: http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "URL 입력(예: http://localhost:11434)", "Enter your current password": "현재 비밀번호를 입력해 주세요", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "리더보드", "Leave empty for unlimited": "무제한을 위해 빈칸으로 남겨두세요", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "특정 모델을 선택하거나 모든 모델을 포함하고 싶으면 빈칸으로 남겨두세요", "Leave empty to use the default prompt, or enter a custom prompt": "기본 프롬프트를 사용하기 위해 빈칸으로 남겨두거나, 커스텀 프롬프트를 입력하세요", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Ollama API 연결 관리", "Manage OpenAI API Connections": "OpenAI API 연결 관리", "Manage Pipelines": "파이프라인 관리", + "Manage Tool Servers": "", "March": "3월", "Max Tokens (num_predict)": "최대 토큰(num_predict)", "Max Upload Count": "업로드 최대 수", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "도구에 임의 코드 실행을 허용하는 함수가 포함되어 있습니다", "Tools have a function calling system that allows arbitrary code execution.": "도구에 임의 코드 실행을 허용하는 함수가 포함되어 있습니다.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "올라마(Ollama)에 접근하는 데 문제가 있나요?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI가 \"{{url}}/api/chat\"로 요청을 보냅니다", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI가 \"{{url}}/chat/completions\"로 요청을 보냅니다", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "무엇을 성취하고 싶으신가요?", "What are you working on?": "어떤 작업을 하고 계신가요?", "What’s New in": "새로운 기능:", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 8137435e50f..38de2a663e8 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -6,6 +6,7 @@ "(latest)": "(naujausias)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} susirašinėjimai", @@ -215,6 +216,7 @@ "Confirm your action": "Patvirtinkite veiksmą", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Ryšiai", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Susisiekite su administratoriumi dėl prieigos", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Įveskite Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Įveskite nuorodą (pvz. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Įveskite nuorododą (pvz. http://localhost:11434", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Tvarkyti procesus", + "Manage Tool Servers": "", "March": "Kovas", "Max Tokens (num_predict)": "Maksimalus žetonų kiekis (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Įrankiai gali naudoti funkcijas ir leisti vykdyti kodą", "Tools have a function calling system that allows arbitrary code execution.": "Įrankiai gali naudoti funkcijas ir leisti vykdyti kodą", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemos prieinant prie Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Kas naujo", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index d9fe80dc819..780d3645fbd 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -6,6 +6,7 @@ "(latest)": "(terkini)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Perbualan {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Sahkan tindakan anda", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Sambungan", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Hubungi admin untuk akses WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Masukkan 'Top K'", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Masukkan URL (cth http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Masukkan URL (cth http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Urus 'Pipelines'", + "Manage Tool Servers": "", "March": "Mac", "Max Tokens (num_predict)": "Token Maksimum ( num_predict )", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Alatan mempunyai sistem panggilan fungsi yang membolehkan pelaksanaan kod sewenang-wenangnya", "Tools have a function calling system that allows arbitrary code execution.": "Alatan mempunyai sistem panggilan fungsi yang membolehkan pelaksanaan kod sewenang-wenangnya.", "Top K": "'Top K'", + "Top K Reranker": "", "Top P": "'Top P'", "Transformers": "", "Trouble accessing Ollama?": "Masalah mengakses Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Apakah yang terbaru dalam", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 16dcbd80ae4..cbce9b9f979 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -6,6 +6,7 @@ "(latest)": "(siste)", "(Ollama)": "", "{{ models }}": "{{ modeller }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} svar", "{{user}}'s Chats": "{{user}} sine samtaler", @@ -215,6 +216,7 @@ "Confirm your action": "Bekreft handlingen", "Confirm your new password": "Bekreft det nye passordet ditt", "Connect to your own OpenAI compatible API endpoints.": "Koble til egne OpenAI-kompatible API-endepunkter", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Tilkoblinger", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakt administrator for å få tilgang til WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Angi tidsavbrudd i sekunder", "Enter to Send": "", "Enter Top K": "Angi Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Angi URL (f.eks. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Angi URL (f.eks. http://localhost:11434)", "Enter your current password": "Angi det gjeldende passordet ditt", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP-server oppdatert", "Leaderboard": "Ledertavle", "Leave empty for unlimited": "La stå tomt for ubegrenset", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "La stå tomt for å inkludere alle modeller fra endepunktet \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "La stå tomt for å inkludere alle modeller fra endepunktet \"{{URL}}/api/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "La stå tomt for å inkludere alle modeller", "Leave empty to use the default prompt, or enter a custom prompt": "La stå tomt for å bruke standard ledetekst, eller angi en tilpasset ledetekst", "Leave model field empty to use the default model.": "La modellfeltet stå tomt for å bruke standard modell.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Behandle API-tilkoblinger for Ollama", "Manage OpenAI API Connections": "Behandle API-tilkoblinger for OpenAPI", "Manage Pipelines": "Behandle pipelines", + "Manage Tool Servers": "", "March": "mars", "Max Tokens (num_predict)": "Maks antall tokener (num_predict)", "Max Upload Count": "Maks antall opplastinger", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Verktøy inneholder et funksjonskallsystem som tillater vilkårlig kodekjøring", "Tools have a function calling system that allows arbitrary code execution.": "Verktøy inneholder et funksjonskallsystem som tillater vilkårlig kodekjøring.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformatorer", "Trouble accessing Ollama?": "Problemer med å koble til Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "URL for WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI vil rette forespørsler til \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI vil rette forespørsler til \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Hva prøver du å oppnå?", "What are you working on?": "Hva jobber du på nå?", "What’s New in": "Hva er nytt i", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 654a545a0ff..0368cd0a228 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -6,6 +6,7 @@ "(latest)": "(nieuwste)", "(Ollama)": "", "{{ models }}": "{{ modellen }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}'s chats", @@ -215,6 +216,7 @@ "Confirm your action": "Bevestig uw actie", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Verbindingen", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Neem contact op met de beheerder voor WebUI-toegang", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Voeg Top K toe", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Voer URL in (Bijv. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Voer URL in (Bijv. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP-server bijgewerkt", "Leaderboard": "Klassement", "Leave empty for unlimited": "Laat leeg voor ongelimiteerd", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Laat leeg om alle modellen van het \"{{URL}}/api/tags\" endpoint toe te voegen", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Laat leeg om alle modellen van het \"{{URL}}/models\" endpoint toe te voegen", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Laat leeg om alle modellen mee te nemen, of selecteer specifieke modellen", "Leave empty to use the default prompt, or enter a custom prompt": "Laat leeg om de standaard prompt te gebruiken, of selecteer een aangepaste prompt", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Beheer Ollama API-verbindingen", "Manage OpenAI API Connections": "Beheer OpenAI API-verbindingen", "Manage Pipelines": "Pijplijnen beheren", + "Manage Tool Servers": "", "March": "Maart", "Max Tokens (num_predict)": "Max Tokens (num_predict)", "Max Upload Count": "Maximale Uploadhoeveelheid", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", "Tools have a function calling system that allows arbitrary code execution.": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemen met toegang tot Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI zal verzoeken doen aan \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI zal verzoeken doen aan \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Wat probeer je te bereiken?", "What are you working on?": "Waar werk je aan?", "What’s New in": "Wat is nieuw in", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index 846f3575d65..9af5ce4a5a0 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -6,6 +6,7 @@ "(latest)": "(ਤਾਜ਼ਾ)", "(Ollama)": "", "{{ models }}": "{{ ਮਾਡਲ }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}} ਦੀਆਂ ਗੱਲਾਂ", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "ਕਨੈਕਸ਼ਨ", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "ਸਿਖਰ K ਦਰਜ ਕਰੋ", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "URL ਦਰਜ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "URL ਦਰਜ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "ਪਾਈਪਲਾਈਨਾਂ ਦਾ ਪ੍ਰਬੰਧਨ ਕਰੋ", + "Manage Tool Servers": "", "March": "ਮਾਰਚ", "Max Tokens (num_predict)": "ਮੈਕਸ ਟੋਕਨ (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "ਸਿਖਰ K", + "Top K Reranker": "", "Top P": "ਸਿਖਰ P", "Transformers": "", "Trouble accessing Ollama?": "ਓਲਾਮਾ ਤੱਕ ਪਹੁੰਚਣ ਵਿੱਚ ਮੁਸ਼ਕਲ?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "ਨਵਾਂ ਕੀ ਹੈ", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 9c58ae2aa7b..37f767c0669 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -6,6 +6,7 @@ "(latest)": "(najnowszy)", "(Ollama)": "", "{{ models }}": "{{ modele }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} odpowiedzi", "{{user}}'s Chats": "Czaty użytkownika {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Potwierdź swoją akcję", "Confirm your new password": "Potwierdź nowe hasło", "Connect to your own OpenAI compatible API endpoints.": "Połącz się ze swoimi własnymi punktami końcowymi API kompatybilnego z OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Połączenia", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Skontaktuj się z administratorem, aby uzyskać dostęp do WebUI.", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Wprowadź {Top K}", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Podaj adres URL (np. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Wprowadź adres URL (np. http://localhost:11434)", "Enter your current password": "Wprowadź swoje aktualne hasło", @@ -634,8 +637,8 @@ "LDAP server updated": "Serwer LDAP został zaktualizowany", "Leaderboard": "Tablica wyników", "Leave empty for unlimited": "Pozostaw puste dla nieograniczonego", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Pozostaw puste, aby uwzględnić wszystkie modele z końca punktu \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Pozostaw puste, aby uwzględnić wszystkie modele z endpointu \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Pozostaw puste, aby uwzględnić wszystkie modele lub wybierz konkretne modele", "Leave empty to use the default prompt, or enter a custom prompt": "Pozostaw puste, aby użyć domyślnego promptu, lub wprowadź niestandardowy prompt", "Leave model field empty to use the default model.": "Pozostaw pole modelu puste, aby użyć domyślnego modelu.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Zarządzaj połączeniami z API Ollama", "Manage OpenAI API Connections": "Zarządzaj połączeniami z API OpenAI", "Manage Pipelines": "Zarządzanie przepływem", + "Manage Tool Servers": "", "March": "Marzec", "Max Tokens (num_predict)": "Maksymalna liczba tokenów (num_predict)", "Max Upload Count": "Maksymalna liczba przesyłanych plików", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonywanie dowolnego kodu", "Tools have a function calling system that allows arbitrary code execution.": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonanie dowolnego kodu.", "Top K": "Najlepsze K", + "Top K Reranker": "", "Top P": "Najlepsze P", "Transformers": "Transformery", "Trouble accessing Ollama?": "Czy masz problemy z dostępem do Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "Adres URL interfejsu internetowego", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI będzie wysyłać żądania do \"{{url}}/api/chat\".", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI będzie wysyłać żądania do \"{{url}}/chat/completions\".", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Do czego dążysz?", "What are you working on?": "Nad czym pracujesz?", "What’s New in": "Co nowego w", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index 072b3af3903..b32316f6b84 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(último)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Chats de {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirme sua ação", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Conexões", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contate o Admin para Acesso ao WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Digite o Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Digite a URL (por exemplo, http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Digite a URL (por exemplo, http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "Servidor LDAP atualizado", "Leaderboard": "Tabela de classificação", "Leave empty for unlimited": "Deixe vazio para ilimitado", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Deixe vazio para incluir todos os modelos do endpoint \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Deixe vazio para incluir todos os modelos do endpoint \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Deixe vazio para incluir todos os modelos ou selecione modelos especificos", "Leave empty to use the default prompt, or enter a custom prompt": "Deixe vazio para usar o prompt padrão, ou insira um prompt personalizado", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Gerenciar Conexões Ollama API", "Manage OpenAI API Connections": "Gerenciar Conexões OpenAI API", "Manage Pipelines": "Gerenciar Pipelines", + "Manage Tool Servers": "", "March": "Março", "Max Tokens (num_predict)": "Máximo de Tokens (num_predict)", "Max Upload Count": "Quantidade máxima de anexos", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Ferramentas possuem um sistema de chamada de funções que permite a execução de código arbitrário", "Tools have a function calling system that allows arbitrary code execution.": "Ferramentas possuem um sistema de chamada de funções que permite a execução de código arbitrário.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemas para acessar o Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "A WebUI fará requisições para \"{{url}}/api/chat\".", "WebUI will make requests to \"{{url}}/chat/completions\"": "A WebUI fará requisições para \"{{url}}/chat/completions\".", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "O que está tentando alcançar?", "What are you working on?": "No que está trabalhando?", "What’s New in": "O que há de novo em", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index e7e9baa7c72..462fdf9b501 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -6,6 +6,7 @@ "(latest)": "(mais recente)", "(Ollama)": "", "{{ models }}": "{{ modelos }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}'s Chats", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Conexões", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contatar Admin para acesso ao WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Escreva o Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Escreva o URL (por exemplo, http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Escreva o URL (por exemplo, http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Gerir pipelines", + "Manage Tool Servers": "", "March": "Março", "Max Tokens (num_predict)": "Máx Tokens (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Problemas a aceder ao Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "O que há de novo em", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index c17bc92c8cd..885ae15713e 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -6,6 +6,7 @@ "(latest)": "(ultimul)", "(Ollama)": "", "{{ models }}": "{{ modele }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "Conversațiile lui {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Confirmă acțiunea ta", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Conexiuni", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Contactează administratorul pentru acces WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Introduceți Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Introduceți URL-ul (de ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Introduceți URL-ul (de ex. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "Tabel de clasament", "Leave empty for unlimited": "Lăsați gol pentru nelimitat", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Lăsați gol pentru a include toate modelele sau selectați modele specifice", "Leave empty to use the default prompt, or enter a custom prompt": "Lăsați gol pentru a utiliza promptul implicit sau introduceți un prompt personalizat", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Gestionează Conductele", + "Manage Tool Servers": "", "March": "Martie", "Max Tokens (num_predict)": "Număr Maxim de Tokeni (num_predict)", "Max Upload Count": "Număr maxim de încărcări", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Instrumentele au un sistem de apelare a funcțiilor care permite executarea arbitrară a codului", "Tools have a function calling system that allows arbitrary code execution.": "Instrumentele au un sistem de apelare a funcțiilor care permite executarea arbitrară a codului.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Probleme la accesarea Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Ce e Nou în", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index e3d23823424..c64d6f7420e 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -6,6 +6,7 @@ "(latest)": "(последняя)", "(Ollama)": "", "{{ models }}": "{{ модели }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} скрытых строк", "{{COUNT}} Replies": "{{COUNT}} Ответов", "{{user}}'s Chats": "Чаты {{user}}'а", @@ -215,6 +216,7 @@ "Confirm your action": "Подтвердите свое действие", "Confirm your new password": "Подтвердите свой новый пароль", "Connect to your own OpenAI compatible API endpoints.": "Подключитесь к своим собственным энд-поинтам API, совместимым с OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Соединение", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Ограничивает усилия по обоснованию для моделей обоснования. Применимо только к моделям обоснования от конкретных поставщиков, которые поддерживают усилия по обоснованию.", "Contact Admin for WebUI Access": "Обратитесь к администратору для получения доступа к WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Введите время ожидания в секундах", "Enter to Send": "Enter для отправки", "Enter Top K": "Введите Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Введите URL-адрес (например, http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Введите URL-адрес (например, http://localhost:11434)", "Enter your current password": "Введите ваш текущий пароль", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP сервер обновлен", "Leaderboard": "Таблица Лидеров", "Leave empty for unlimited": "Оставьте пустым для неограниченного", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Оставьте пустым, чтобы включить все модели из энд-поинта \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Оставьте пустым, чтобы включить все модели из энд-поинта \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Оставьте поле пустым, чтобы включить все модели или выбрать конкретные модели", "Leave empty to use the default prompt, or enter a custom prompt": "Оставьте пустым, чтобы использовать промпт по умолчанию, или введите пользовательский промпт", "Leave model field empty to use the default model.": "Оставьте поле model пустым, чтобы использовать модель по умолчанию.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Управление соединениями API Ollama", "Manage OpenAI API Connections": "Управление соединениями API OpenAI", "Manage Pipelines": "Управление конвейерами", + "Manage Tool Servers": "", "March": "Март", "Max Tokens (num_predict)": "Максимальное количество токенов (num_predict)", "Max Upload Count": "Максимальное количество загрузок", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Инструменты имеют систему вызова функций, которая позволяет выполнять произвольный код", "Tools have a function calling system that allows arbitrary code execution.": "Инструменты имеют систему вызова функций, которая позволяет выполнять произвольный код.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Проблемы с доступом к Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI будет отправлять запросы к \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI будет отправлять запросы к \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Чего вы пытаетесь достичь?", "What are you working on?": "Над чем вы работаете?", "What’s New in": "Что нового в", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index 2d3323b6a69..223b9f06be6 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -6,6 +6,7 @@ "(latest)": "Najnovšie", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}'s konverzácie", @@ -215,6 +216,7 @@ "Confirm your action": "Potvrďte svoju akciu", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Pripojenia", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontaktujte administrátora pre prístup k webovému rozhraniu.", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Zadajte horné K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Zadajte URL (napr. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Zadajte URL (napr. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "Rebríček", "Leave empty for unlimited": "Nechajte prázdne pre neobmedzene", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Nechajte prázdne pre zahrnutie všetkých modelov alebo vyberte konkrétne modely.", "Leave empty to use the default prompt, or enter a custom prompt": "Nechajte prázdne pre použitie predvoleného podnetu, alebo zadajte vlastný podnet.", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Správa pipelines", + "Manage Tool Servers": "", "March": "Marec", "Max Tokens (num_predict)": "Maximálny počet tokenov (num_predict)", "Max Upload Count": "Maximálny počet nahraní", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Nástroje majú systém volania funkcií, ktorý umožňuje ľubovoľné spúšťanie kódu.", "Tools have a function calling system that allows arbitrary code execution.": "Nástroje majú systém volania funkcií, ktorý umožňuje spúšťanie ľubovoľného kódu.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Máte problémy s prístupom k Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Čo je nové v", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index 17614d3eadf..ffcfba20340 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -6,6 +6,7 @@ "(latest)": "(најновије)", "(Ollama)": "", "{{ models }}": "{{ модели }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} одговора", "{{user}}'s Chats": "Ћаскања корисника {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "Потврди радњу", "Confirm your new password": "Потврди нову лозинку", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Везе", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Пишите админима за приступ на WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Унесите Топ К", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Унесите адресу (нпр. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Унесите адресу (нпр. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "ЛДАП сервер измењен", "Leaderboard": "Ранг листа", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Управљај Ollama АПИ везама", "Manage OpenAI API Connections": "Управљај OpenAI АПИ везама", "Manage Pipelines": "Управљање цевоводима", + "Manage Tool Servers": "", "March": "Март", "Max Tokens (num_predict)": "Маx Токенс (нум_предицт)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "Топ К", + "Top K Reranker": "", "Top P": "Топ П", "Transformers": "", "Trouble accessing Ollama?": "Проблеми са приступом Ollama-и?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI адреса", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Шта је ново у", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index c8141ee1a1b..eb341402623 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -6,6 +6,7 @@ "(latest)": "(senaste)", "(Ollama)": "", "{{ models }}": "{{ modeller }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} Svar", "{{user}}'s Chats": "{{user}}s Chats", @@ -215,6 +216,7 @@ "Confirm your action": "Bekräfta åtgärd", "Confirm your new password": "Bekräfta ditt nya lösenord", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Anslutningar", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Kontakta administratören för att få åtkomst till WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Ange Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Ange URL (t.ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Ange URL (t.ex. http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Hantera rörledningar", + "Manage Tool Servers": "", "March": "mars", "Max Tokens (num_predict)": "Maximalt antal tokens (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Verktyg har ett funktionsanropssystem som tillåter godtycklig kodkörning", "Tools have a function calling system that allows arbitrary code execution.": "Verktyg har ett funktionsanropssystem som tillåter godtycklig kodkörning", "Top K": "Topp K", + "Top K Reranker": "", "Top P": "Topp P", "Transformers": "", "Trouble accessing Ollama?": "Problem med att komma åt Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Vad försöker du uppnå?", "What are you working on?": "Var arbetar du med?", "What’s New in": "Vad är nytt i", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index f28ffa9788a..36a19d4bf89 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -6,6 +6,7 @@ "(latest)": "(ล่าสุด)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "การสนทนาของ {{user}}", @@ -215,6 +216,7 @@ "Confirm your action": "ยืนยันการดำเนินการของคุณ", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "การเชื่อมต่อ", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "ติดต่อผู้ดูแลระบบสำหรับการเข้าถึง WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "ใส่ Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "ใส่ URL (เช่น http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "ใส่ URL (เช่น http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "จัดการไปป์ไลน์", + "Manage Tool Servers": "", "March": "มีนาคม", "Max Tokens (num_predict)": "โทเค็นสูงสุด (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "เครื่องมือมีระบบการเรียกใช้ฟังก์ชันที่สามารถดำเนินการโค้ดใดๆ ได้", "Tools have a function calling system that allows arbitrary code execution.": "เครื่องมือมีระบบการเรียกใช้ฟังก์ชันที่สามารถดำเนินการโค้ดใดๆ ได้", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "มีปัญหาในการเข้าถึง Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "มีอะไรใหม่ใน", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index 19581081fbf..f8390be5d39 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -6,6 +6,7 @@ "(latest)": "", "(Ollama)": "", "{{ models }}": "", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "", @@ -215,6 +216,7 @@ "Confirm your action": "", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "", "Enter URL (e.g. http://localhost:11434)": "", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "", + "Manage Tool Servers": "", "March": "", "Max Tokens (num_predict)": "", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", "Top K": "", + "Top K Reranker": "", "Top P": "", "Transformers": "", "Trouble accessing Ollama?": "", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index eee1f2c1792..2e19a7f66b3 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -6,6 +6,7 @@ "(latest)": "(en son)", "(Ollama)": "", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} Yanıt", "{{user}}'s Chats": "{{user}}'ın Sohbetleri", @@ -215,6 +216,7 @@ "Confirm your action": "İşleminizi onaylayın", "Confirm your new password": "Yeni parolanızı onaylayın", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Bağlantılar", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "WebUI Erişimi için Yöneticiyle İletişime Geçin", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Top K'yı girin", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "URL'yi Girin (örn. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "URL'yi Girin (e.g. http://localhost:11434)", "Enter your current password": "Mevcut parolanızı girin", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP sunucusu güncellendi", "Leaderboard": "Liderlik Tablosu", "Leave empty for unlimited": "Sınırsız için boş bırakınız", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Tüm modelleri dahil etmek için boş bırakın veya belirli modelleri seçin", "Leave empty to use the default prompt, or enter a custom prompt": "Varsayılan promptu kullanmak için boş bırakın veya özel bir prompt girin", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Ollama API Bağlantılarını Yönet", "Manage OpenAI API Connections": "OpenAI API Bağlantılarını Yönet", "Manage Pipelines": "Pipelineları Yönet", + "Manage Tool Servers": "", "March": "Mart", "Max Tokens (num_predict)": "Maksimum Token (num_predict)", "Max Upload Count": "Maksimum Yükleme Sayısı", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Araçlar, keyfi kod yürütme izni veren bir fonksiyon çağırma sistemine sahiptir", "Tools have a function calling system that allows arbitrary code execution.": "Araçlar, keyfi kod yürütme izni veren bir fonksiyon çağırma sistemine sahiptir.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Dönüştürücüler", "Trouble accessing Ollama?": "Ollama'ya erişmede sorun mu yaşıyorsunuz?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL'si", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI, \"{{url}}/api/chat\" adresine istek yapacak", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI, \"{{url}}/chat/completions\" adresine istek yapacak", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Ne yapmaya çalışıyorsunuz?", "What are you working on?": "Üzerinde çalıştığınız nedir?", "What’s New in": "Yenilikler:", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index 981e9b11e40..52839d775ab 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -6,6 +6,7 @@ "(latest)": "(остання)", "(Ollama)": "(Ollama)", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} прихованих рядків", "{{COUNT}} Replies": "{{COUNT}} Відповіді", "{{user}}'s Chats": "Чати {{user}}а", @@ -215,6 +216,7 @@ "Confirm your action": "Підтвердіть свою дію", "Confirm your new password": "Підтвердіть свій новий пароль", "Connect to your own OpenAI compatible API endpoints.": "Підключіться до своїх власних API-ендпоінтів, сумісних з OpenAI.", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "З'єднання", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Обмежує зусилля на міркування для моделей міркування. Діє лише для моделей міркування від конкретних постачальників, які підтримують зусилля міркування.", "Contact Admin for WebUI Access": "Зверніться до адміна для отримання доступу до WebUI", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "Введіть тайм-аут у секундах", "Enter to Send": "Введіть для відправки", "Enter Top K": "Введіть Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Введіть URL-адресу (напр., http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Введіть URL-адресу (напр., http://localhost:11434)", "Enter your current password": "Введіть ваш поточний пароль", @@ -634,8 +637,8 @@ "LDAP server updated": "Сервер LDAP оновлено", "Leaderboard": "Таблиця лідерів", "Leave empty for unlimited": "Залиште порожнім для необмеженого розміру", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "Залиште порожнім, щоб включити усі моделі з кінцевої точки \"{{URL}}/api/tags\"", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "Залиште порожнім, щоб включити усі моделі з кінцевої точки \"{{URL}}/models\"", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "Залиште порожнім, щоб включити усі моделі, або виберіть конкретні моделі.", "Leave empty to use the default prompt, or enter a custom prompt": "Залиште порожнім для використання стандартного запиту, або введіть власний запит", "Leave model field empty to use the default model.": "Залиште поле моделі порожнім, щоб використовувати модель за замовчуванням.", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "Керувати з'єднаннями Ollama API", "Manage OpenAI API Connections": "Керувати з'єднаннями OpenAI API", "Manage Pipelines": "Керування конвеєрами", + "Manage Tool Servers": "", "March": "Березень", "Max Tokens (num_predict)": "Макс токенів (num_predict)", "Max Upload Count": "Макс. кількість завантажень", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду", "Tools have a function calling system that allows arbitrary code execution.": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Трансформери", "Trouble accessing Ollama?": "Проблеми з доступом до Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI надсилатиме запити до \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI надсилатиме запити до \"{{url}}/chat/completions\"", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "Чого ви прагнете досягти?", "What are you working on?": "Над чим ти працюєш?", "What’s New in": "Що нового в", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 0db67a50818..225904d790c 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -6,6 +6,7 @@ "(latest)": "(تازہ ترین)", "(Ollama)": "", "{{ models }}": "{{ ماڈلز }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{ صارف }} کی بات چیت", @@ -215,6 +216,7 @@ "Confirm your action": "اپنی کارروائی کی تصدیق کریں", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "کنکشنز", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "ویب یو آئی رسائی کے لیے ایڈمن سے رابطہ کریں", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "اوپر کے K درج کریں", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "یو آر ایل درج کریں (جیسے کہ http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "یو آر ایل درج کریں (مثلاً http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "لیڈر بورڈ", "Leave empty for unlimited": "لامحدود کے لیے خالی چھوڑیں", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "تمام ماڈلز کو شامل کرنے کے لئے خالی چھوڑ دیں یا مخصوص ماڈلز منتخب کریں", "Leave empty to use the default prompt, or enter a custom prompt": "خالی چھوڑیں تاکہ ڈیفالٹ پرامپٹ استعمال ہو، یا ایک حسب ضرورت پرامپٹ درج کریں", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "پائپ لائنز کا نظم کریں", + "Manage Tool Servers": "", "March": "مارچ", "Max Tokens (num_predict)": "زیادہ سے زیادہ ٹوکنز (num_predict)", "Max Upload Count": "زیادہ سے زیادہ اپلوڈ تعداد", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "ٹولز کے پاس ایک فنکشن کالنگ سسٹم ہے جو اختیاری کوڈ کے نفاذ کی اجازت دیتا ہے", "Tools have a function calling system that allows arbitrary code execution.": "ٹولز کے پاس ایک فنکشن کالنگ سسٹم ہے جو اختیاری کوڈ کی عمل درآمد کی اجازت دیتا ہے", "Top K": "اوپر کے K", + "Top K Reranker": "", "Top P": "ٹاپ پی", "Transformers": "", "Trouble accessing Ollama?": "Ollama تک رسائی میں مشکل؟", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "میں نیا کیا ہے", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 6a31f9d4e3a..64f8f41a26e 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -6,6 +6,7 @@ "(latest)": "(mới nhất)", "(Ollama)": "", "{{ models }}": "{{ mô hình }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "", "{{user}}'s Chats": "{{user}}'s Chats", @@ -215,6 +216,7 @@ "Confirm your action": "Xác nhận hành động của bạn", "Confirm your new password": "", "Connect to your own OpenAI compatible API endpoints.": "", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "Kết nối", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Liên hệ với Quản trị viên để được cấp quyền truy cập", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "", "Enter to Send": "", "Enter Top K": "Nhập Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "Nhập URL (vd: http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Nhập URL (vd: http://localhost:11434)", "Enter your current password": "", @@ -634,8 +637,8 @@ "LDAP server updated": "", "Leaderboard": "", "Leave empty for unlimited": "", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "", "Leave empty to use the default prompt, or enter a custom prompt": "", "Leave model field empty to use the default model.": "", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "", "Manage OpenAI API Connections": "", "Manage Pipelines": "Quản lý Pipelines", + "Manage Tool Servers": "", "March": "Tháng 3", "Max Tokens (num_predict)": "Tokens tối đa (num_predict)", "Max Upload Count": "", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Các Tools có hệ thống gọi function cho phép thực thi mã tùy ý", "Tools have a function calling system that allows arbitrary code execution.": "Các Tools có hệ thống gọi function cho phép thực thi mã tùy ý.", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "", "Trouble accessing Ollama?": "Gặp vấn đề khi truy cập Ollama?", @@ -1155,6 +1160,7 @@ "WebUI URL": "", "WebUI will make requests to \"{{url}}/api/chat\"": "", "WebUI will make requests to \"{{url}}/chat/completions\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "", "What are you working on?": "", "What’s New in": "Thông tin mới về", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index d778b93bd08..ec6a7b75174 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -6,6 +6,7 @@ "(latest)": "(最新版)", "(Ollama)": "(Ollama)", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "{{COUNT}} 行被隐藏", "{{COUNT}} Replies": "{{COUNT}} 回复", "{{user}}'s Chats": "{{user}} 的对话记录", @@ -215,6 +216,7 @@ "Confirm your action": "确定吗?", "Confirm your new password": "确认新密码", "Connect to your own OpenAI compatible API endpoints.": "连接到你自己的与 OpenAI 兼容的 API 接口端点。", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "外部连接", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "约束推理模型的推理努力程度。仅适用于支持推理努力控制的特定提供商的推理模型。", "Contact Admin for WebUI Access": "请联系管理员以获取访问权限", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "输入以秒为单位的超时时间", "Enter to Send": "Enter 键发送", "Enter Top K": "输入 Top K", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "输入地址 (例如:http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "输入地址 (例如:http://localhost:11434)", "Enter your current password": "输入当前密码", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP 服务器已更新", "Leaderboard": "排行榜", "Leave empty for unlimited": "留空表示无限制", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "留空表示包含所有来自 \"{{URL}}/api/tags\" 的模型", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "留空表示包含所有来自 \"{{URL}}/models\" 的模型", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "留空表示包含所有模型或请选择模型", "Leave empty to use the default prompt, or enter a custom prompt": "留空以使用默认提示词,或输入自定义提示词。", "Leave model field empty to use the default model.": "将模型字段留空以使用默认模型。", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "管理Ollama API连接", "Manage OpenAI API Connections": "管理OpenAI API连接", "Manage Pipelines": "管理 Pipeline", + "Manage Tool Servers": "", "March": "三月", "Max Tokens (num_predict)": "最大Token数量 (num_predict)", "Max Upload Count": "最大上传数量", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "注意:工具有权执行任意代码", "Tools have a function calling system that allows arbitrary code execution.": "注意:工具有权执行任意代码。", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "访问 Ollama 时遇到问题?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI 将向 \"{{url}}/api/chat\" 发出请求", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI 将向 \"{{url}}/chat/completions\" 发出请求", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "你想要达到什么目标?", "What are you working on?": "你在忙于什么?", "What’s New in": "最近更新内容于", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index daa1da132f1..1c6baaaa03e 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -6,6 +6,7 @@ "(latest)": "(最新版)", "(Ollama)": "(Ollama)", "{{ models }}": "{{ models }}", + "{{COUNT}} Available Tool Servers": "", "{{COUNT}} hidden lines": "已隱藏 {{COUNT}} 行", "{{COUNT}} Replies": "{{COUNT}} 回覆", "{{user}}'s Chats": "{{user}} 的對話", @@ -215,6 +216,7 @@ "Confirm your action": "確認您的操作", "Confirm your new password": "確認您的新密碼", "Connect to your own OpenAI compatible API endpoints.": "連線到您自己的 OpenAI 相容 API 端點。", + "Connect to your own OpenAPI compatible external tool servers.": "", "Connections": "連線", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "限制推理模型的推理程度。僅適用於特定供應商支援推理程度的推理模型。", "Contact Admin for WebUI Access": "請聯絡管理員以取得 WebUI 存取權限", @@ -437,6 +439,7 @@ "Enter timeout in seconds": "請以秒為單位輸入超時時間", "Enter to Send": "使用 Enter 傳送", "Enter Top K": "輸入 Top K 值", + "Enter Top K Reranker": "", "Enter URL (e.g. http://127.0.0.1:7860/)": "輸入 URL(例如:http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "輸入 URL(例如:http://localhost:11434)", "Enter your current password": "輸入您的目前密碼", @@ -634,8 +637,8 @@ "LDAP server updated": "LDAP 伺服器已更新", "Leaderboard": "排行榜", "Leave empty for unlimited": "留空表示無限制", - "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "留空以包含來自 \"{{URL}}/api/tags\" 端點的所有模型", - "Leave empty to include all models from \"{{URL}}/models\" endpoint": "留空以包含來自 \"{{URL}}/models\" 端點的所有模型", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "", "Leave empty to include all models or select specific models": "留空以包含所有模型或選擇特定模型", "Leave empty to use the default prompt, or enter a custom prompt": "留空以使用預設提示詞,或輸入自訂提示詞", "Leave model field empty to use the default model.": "留空模型欄位以使用預設模型。", @@ -662,6 +665,7 @@ "Manage Ollama API Connections": "管理 Ollama API 連線", "Manage OpenAI API Connections": "管理 OpenAI API 連線", "Manage Pipelines": "管理管線", + "Manage Tool Servers": "", "March": "3 月", "Max Tokens (num_predict)": "最大 token 數(num_predict)", "Max Upload Count": "最大上傳數量", @@ -1078,6 +1082,7 @@ "Tools have a function calling system that allows arbitrary code execution": "工具具有允許執行任意程式碼的函式呼叫系統", "Tools have a function calling system that allows arbitrary code execution.": "工具具有允許執行任意程式碼的函式呼叫系統。", "Top K": "Top K", + "Top K Reranker": "", "Top P": "Top P", "Transformers": "Transformers", "Trouble accessing Ollama?": "存取 Ollama 時遇到問題?", @@ -1155,6 +1160,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI 將向 \"{{url}}/api/chat\" 傳送請求", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI 將向 \"{{url}}/chat/completions\" 傳送請求", + "WebUI will make requests to \"{{url}}/openapi.json\"": "", "What are you trying to achieve?": "您正在試圖完成什麼?", "What are you working on?": "您現在的工作是什麼?", "What’s New in": "新功能", From 69dee19568dfd037a46f8f3b176a827a466577c4 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Mar 2025 01:42:33 -0700 Subject: [PATCH 414/623] refac --- src/lib/components/AddConnectionModal.svelte | 3 +++ src/lib/components/AddServerModal.svelte | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/lib/components/AddConnectionModal.svelte b/src/lib/components/AddConnectionModal.svelte index 7ffa8f556b5..864d850a6ac 100644 --- a/src/lib/components/AddConnectionModal.svelte +++ b/src/lib/components/AddConnectionModal.svelte @@ -85,6 +85,9 @@ return; } + // remove trailing slash from url + url = url.replace(/\/$/, ''); + const connection = { url, key, diff --git a/src/lib/components/AddServerModal.svelte b/src/lib/components/AddServerModal.svelte index 160701d27bd..f9970e4e84d 100644 --- a/src/lib/components/AddServerModal.svelte +++ b/src/lib/components/AddServerModal.svelte @@ -33,6 +33,9 @@ const submitHandler = async () => { loading = true; + // remove trailing slash from url + url = url.replace(/\/$/, ''); + const connection = { url, key, From 0615c11a53ca67def3a45bd2c6ca3256479bb14f Mon Sep 17 00:00:00 2001 From: Jan Kessler Date: Thu, 27 Mar 2025 10:22:49 +0100 Subject: [PATCH 415/623] fix sentinel connection being attempted for non-sentinel redis --- backend/open_webui/utils/redis.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/utils/redis.py b/backend/open_webui/utils/redis.py index 3ad1f78bedf..fa90a26db1b 100644 --- a/backend/open_webui/utils/redis.py +++ b/backend/open_webui/utils/redis.py @@ -35,9 +35,11 @@ def get_redis_connection(redis_url, redis_sentinels, decode_responses=True): return redis.Redis.from_url(redis_url, decode_responses=decode_responses) def get_sentinels_from_env(sentinel_hosts_env, sentinel_port_env): - sentinel_hosts=sentinel_hosts_env.split(',') - sentinel_port=int(sentinel_port_env) - return [(host, sentinel_port) for host in sentinel_hosts] + if sentinel_hosts_env: + sentinel_hosts=sentinel_hosts_env.split(',') + sentinel_port=int(sentinel_port_env) + return [(host, sentinel_port) for host in sentinel_hosts] + return [] class AsyncRedisSentinelManager(socketio.AsyncRedisManager): def __init__(self, sentinel_hosts, sentinel_port=26379, redis_port=6379, service="mymaster", db=0, From d1bc2cfa2f5a4cdc366be7d1ee53db61eeb2ef12 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Thu, 27 Mar 2025 02:27:56 -0700 Subject: [PATCH 416/623] refac: external tools server support --- backend/open_webui/main.py | 1 + backend/open_webui/utils/middleware.py | 35 +++-- src/lib/apis/index.ts | 133 ++++++++++++++++++ src/lib/components/chat/Chat.svelte | 12 +- src/lib/components/chat/Settings/Tools.svelte | 6 +- src/lib/stores/index.ts | 2 + src/lib/utils/index.ts | 56 ++++++++ src/routes/(app)/+layout.svelte | 8 +- src/routes/+layout.svelte | 43 ++++-- 9 files changed, 265 insertions(+), 31 deletions(-) diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 3fbeb6c8454..7fc4e398355 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -1052,6 +1052,7 @@ async def chat_completion( "message_id": form_data.pop("id", None), "session_id": form_data.pop("session_id", None), "tool_ids": form_data.get("tool_ids", None), + "tool_servers": form_data.pop("tool_servers", None), "files": form_data.get("files", None), "features": form_data.get("features", None), "variables": form_data.get("variables", None), diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index f6d81214ec7..77b01bdfc2d 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -213,8 +213,9 @@ async def tool_call_handler(tool_call): "type": "execute:tool", "data": { "id": str(uuid4()), - "tool": tool, + "name": tool_function_name, "params": tool_function_params, + "tool": tool, "server": tool.get("server", {}), "session_id": metadata.get("session_id", None), }, @@ -224,17 +225,30 @@ async def tool_call_handler(tool_call): except Exception as e: tool_output = str(e) + if isinstance(tool_output, dict): + tool_output = json.dumps(tool_output, indent=4) + if isinstance(tool_output, str): - if tools[tool_function_name]["citation"]: + tool_id = tools[tool_function_name].get("toolkit_id", "") + if tools[tool_function_name].get("citation", False): + sources.append( { "source": { - "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}" + "name": ( + f"TOOL:" + f"{tool_id}/{tool_function_name}" + if tool_id + else f"{tool_function_name}" + ), }, "document": [tool_output], "metadata": [ { - "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}" + "source": ( + f"TOOL:" + f"{tool_id}/{tool_function_name}" + if tool_id + else f"{tool_function_name}" + ) } ], } @@ -246,13 +260,17 @@ async def tool_call_handler(tool_call): "document": [tool_output], "metadata": [ { - "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}" + "source": ( + f"TOOL:" + f"{tool_id}/{tool_function_name}" + if tool_id + else f"{tool_function_name}" + ) } ], } ) - if tools[tool_function_name]["file_handler"]: + if tools[tool_function_name].get("file_handler", False): skip_files = True # check if "tool_calls" in result @@ -788,7 +806,7 @@ async def process_chat_payload(request, form_data, user, metadata, model): # Server side tools tool_ids = metadata.get("tool_ids", None) # Client side tools - tool_servers = form_data.get("tool_servers", None) + tool_servers = metadata.get("tool_servers", None) log.debug(f"{tool_ids=}") log.debug(f"{tool_servers=}") @@ -1824,8 +1842,9 @@ async def stream_body_handler(response): "type": "execute:tool", "data": { "id": str(uuid4()), - "tool": tool, + "name": tool_name, "params": tool_function_params, + "tool": tool, "server": tool.get("server", {}), "session_id": metadata.get( "session_id", None diff --git a/src/lib/apis/index.ts b/src/lib/apis/index.ts index 674f24267f4..2e6e19e6a70 100644 --- a/src/lib/apis/index.ts +++ b/src/lib/apis/index.ts @@ -1,4 +1,5 @@ import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; +import { convertOpenApiToToolPayload } from '$lib/utils'; import { getOpenAIModelsDirect } from './openai'; export const getModels = async ( @@ -256,6 +257,138 @@ export const stopTask = async (token: string, id: string) => { return res; }; +export const getToolServerData = async (token: string, url: string) => { + let error = null; + + const res = await fetch(`${url}/openapi.json`, { + method: 'GET', + headers: { + Accept: 'application/json', + 'Content-Type': 'application/json', + ...(token && { authorization: `Bearer ${token}` }) + } + }) + .then(async (res) => { + if (!res.ok) throw await res.json(); + return res.json(); + }) + .catch((err) => { + console.log(err); + if ('detail' in err) { + error = err.detail; + } else { + error = err; + } + return null; + }); + + if (error) { + throw error; + } + + const data = { + openapi: res, + info: res.info, + specs: convertOpenApiToToolPayload(res) + }; + + console.log(data); + return data; +}; + +export const getToolServersData = async (servers: object[]) => { + return await Promise.all( + servers + .filter(async (server) => server?.config?.enable) + .map(async (server) => { + const data = await getToolServerData(server?.key, server?.url).catch((err) => { + console.error(err); + return null; + }); + + if (data) { + const { openapi, info, specs } = data; + return { + url: server?.url, + openapi: openapi, + info: info, + specs: specs + }; + } + }) + ); +}; + +export const executeToolServer = async ( + token: string, + url: string, + name: string, + params: object, + serverData: { openapi: any; info: any; specs: any } +) => { + let error = null; + + try { + // Find the matching operationId in the OpenAPI specification + const matchingRoute = Object.entries(serverData.openapi.paths).find(([path, methods]) => { + return Object.entries(methods).some( + ([method, operation]: any) => operation.operationId === name + ); + }); + + if (!matchingRoute) { + throw new Error(`No matching route found for operationId: ${name}`); + } + + const [route, methods] = matchingRoute; + const methodEntry = Object.entries(methods).find( + ([method, operation]: any) => operation.operationId === name + ); + + if (!methodEntry) { + throw new Error(`No matching method found for operationId: ${name}`); + } + + const [httpMethod, operation]: [string, any] = methodEntry; + + // Replace path parameters in the URL + let finalUrl = `${url}${route}`; + if (operation.parameters) { + Object.entries(params).forEach(([key, value]) => { + finalUrl = finalUrl.replace(`{${key}}`, encodeURIComponent(value as string)); + }); + } + + // Headers and request options + const headers = { + ...(token && { authorization: `Bearer ${token}` }), + 'Content-Type': 'application/json' + }; + + let requestOptions: RequestInit = { + method: httpMethod.toUpperCase(), + headers + }; + + // Handle request body for POST, PUT, PATCH + if (['post', 'put', 'patch'].includes(httpMethod.toLowerCase()) && operation.requestBody) { + requestOptions.body = JSON.stringify(params); + } + + // Execute the request + const res = await fetch(finalUrl, requestOptions); + if (!res.ok) { + throw new Error(`HTTP error! Status: ${res.status}`); + } + + return await res.json(); + } catch (err: any) { + error = err.message; + console.error('API Request Error:', error); + return { error }; + } +}; + export const getTaskConfig = async (token: string = '') => { let error = null; diff --git a/src/lib/components/chat/Chat.svelte b/src/lib/components/chat/Chat.svelte index fe733d616e9..a6337ef8e1b 100644 --- a/src/lib/components/chat/Chat.svelte +++ b/src/lib/components/chat/Chat.svelte @@ -35,7 +35,8 @@ showOverview, chatTitle, showArtifacts, - tools + tools, + toolServers } from '$lib/stores'; import { convertMessagesToHistory, @@ -120,8 +121,6 @@ let webSearchEnabled = false; let codeInterpreterEnabled = false; - let toolServers = []; - let chat = null; let tags = []; @@ -194,8 +193,6 @@ setToolIds(); } - $: toolServers = ($settings?.toolServers ?? []).filter((server) => server?.config?.enable); - const setToolIds = async () => { if (!$tools) { tools.set(await getTools(localStorage.token)); @@ -1570,6 +1567,7 @@ files: (files?.length ?? 0) > 0 ? files : undefined, tool_ids: selectedToolIds.length > 0 ? selectedToolIds : undefined, + tool_servers: $toolServers, features: { image_generation: @@ -2038,7 +2036,7 @@ bind:codeInterpreterEnabled bind:webSearchEnabled bind:atSelectedModel - {toolServers} + toolServers={$toolServers} transparentBackground={$settings?.backgroundImageUrl ?? false} {stopResponse} {createMessagePair} @@ -2092,7 +2090,7 @@ bind:webSearchEnabled bind:atSelectedModel transparentBackground={$settings?.backgroundImageUrl ?? false} - {toolServers} + toolServers={$toolServers} {stopResponse} {createMessagePair} on:upload={async (e) => { diff --git a/src/lib/components/chat/Settings/Tools.svelte b/src/lib/components/chat/Settings/Tools.svelte index 740e4712f70..a900b5a469e 100644 --- a/src/lib/components/chat/Settings/Tools.svelte +++ b/src/lib/components/chat/Settings/Tools.svelte @@ -1,12 +1,12 @@
@@ -176,19 +192,22 @@ {#if open && !hide}
{#if attributes?.type === 'tool_calls'} + {@const args = JSON.parse(decode(attributes?.arguments))} + {@const result = JSON.parse(decode(attributes?.result ?? ''))} + {#if attributes?.done === 'true'} \`\`\`json -> ${JSON.stringify(JSON.parse(JSON.parse(decode(attributes?.arguments))), null, 2)} -> ${JSON.stringify(JSON.parse(JSON.parse(decode(attributes?.result))), null, 2)} +> ${formatJSONString(args)} +> ${formatJSONString(result)} > \`\`\``} /> {:else} \`\`\`json -> ${JSON.stringify(JSON.parse(JSON.parse(decode(attributes?.arguments))), null, 2)} +> ${formatJSONString(args)} > \`\`\``} /> {/if} From 08ff1d8d8883fdb43142b6c86b1b01ca675bd9be Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 29 Mar 2025 15:03:10 -0700 Subject: [PATCH 444/623] refac --- src/lib/components/common/Collapsible.svelte | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib/components/common/Collapsible.svelte b/src/lib/components/common/Collapsible.svelte index 9e7fc36bb42..dfed04acf2d 100644 --- a/src/lib/components/common/Collapsible.svelte +++ b/src/lib/components/common/Collapsible.svelte @@ -55,7 +55,7 @@ function formatJSONString(obj) { try { - const parsed = JSON.parse(obj); + const parsed = JSON.parse(JSON.parse(obj)); // If parsed is an object/array, then it's valid JSON if (typeof parsed === 'object') { return JSON.stringify(parsed, null, 2); @@ -192,8 +192,8 @@ {#if open && !hide}
{#if attributes?.type === 'tool_calls'} - {@const args = JSON.parse(decode(attributes?.arguments))} - {@const result = JSON.parse(decode(attributes?.result ?? ''))} + {@const args = decode(attributes?.arguments)} + {@const result = decode(attributes?.result ?? '')} {#if attributes?.done === 'true'} Date: Sat, 29 Mar 2025 15:23:02 -0700 Subject: [PATCH 445/623] refac --- backend/open_webui/routers/files.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/backend/open_webui/routers/files.py b/backend/open_webui/routers/files.py index e234bf098cc..8a5232917fd 100644 --- a/backend/open_webui/routers/files.py +++ b/backend/open_webui/routers/files.py @@ -5,7 +5,16 @@ from typing import Optional from urllib.parse import quote -from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile, status, Query +from fastapi import ( + APIRouter, + Depends, + File, + HTTPException, + Request, + UploadFile, + status, + Query, +) from fastapi.responses import FileResponse, StreamingResponse from open_webui.constants import ERROR_MESSAGES from open_webui.env import SRC_LOG_LEVELS @@ -38,7 +47,7 @@ def upload_file( file: UploadFile = File(...), user=Depends(get_verified_user), file_metadata: dict = {}, - ingest_file: bool = Query(True) + process: bool = Query(True), ): log.info(f"file.content_type: {file.content_type}") try: @@ -67,7 +76,7 @@ def upload_file( } ), ) - if ingest_file: + if process: try: if file.content_type in [ "audio/mpeg", @@ -228,7 +237,9 @@ async def update_file_data_content_by_id( @router.get("/{id}/content") -async def get_file_content_by_id(id: str, user=Depends(get_verified_user), as_attachment: bool = Query(False)): +async def get_file_content_by_id( + id: str, user=Depends(get_verified_user), attachment: bool = Query(False) +): file = Files.get_file_by_id(id) if file and (file.user_id == user.id or user.role == "admin"): try: @@ -246,12 +257,14 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user), as_at encoded_filename = quote(filename) headers = {} - if as_attachment: + if attachment: headers["Content-Disposition"] = ( f"attachment; filename*=UTF-8''{encoded_filename}" ) else: - if content_type == "application/pdf" or filename.lower().endswith(".pdf"): + if content_type == "application/pdf" or filename.lower().endswith( + ".pdf" + ): headers["Content-Disposition"] = ( f"inline; filename*=UTF-8''{encoded_filename}" ) From feaa01dc77aada29bc71ccd3741260dc08afefe1 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 29 Mar 2025 16:52:42 -0700 Subject: [PATCH 446/623] fix: ollama requests --- backend/open_webui/routers/ollama.py | 29 ++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index ea093be49ab..fcb263d1e0a 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -465,18 +465,27 @@ async def get_ollama_versions(request: Request, url_idx: Optional[int] = None): if request.app.state.config.ENABLE_OLLAMA_API: if url_idx is None: # returns lowest version - request_tasks = [ - send_get_request( - f"{url}/api/version", + request_tasks = [] + + for idx, url in enumerate(request.app.state.config.OLLAMA_BASE_URLS): + api_config = request.app.state.config.OLLAMA_API_CONFIGS.get( + str(idx), request.app.state.config.OLLAMA_API_CONFIGS.get( - str(idx), - request.app.state.config.OLLAMA_API_CONFIGS.get( - url, {} - ), # Legacy support - ).get("key", None), + url, {} + ), # Legacy support ) - for idx, url in enumerate(request.app.state.config.OLLAMA_BASE_URLS) - ] + + enable = api_config.get("enable", True) + key = api_config.get("key", None) + + if enable: + request_tasks.append( + send_get_request( + f"{url}/api/version", + key, + ) + ) + responses = await asyncio.gather(*request_tasks) responses = list(filter(lambda x: x is not None, responses)) From c700126c1712db9f7cba0dd617ffbae73fbb2198 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 29 Mar 2025 16:52:49 -0700 Subject: [PATCH 447/623] refac: html rendering --- .../chat/Messages/Markdown/MarkdownInlineTokens.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte b/src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte index dc24b6dee6d..7693e4cb438 100644 --- a/src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte +++ b/src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte @@ -31,7 +31,7 @@ {:else if token.text.includes(` {:else} - {token.text} + {@html html} {/if} {:else if token.type === 'link'} {#if token.tokens} From 3be626bef3d9b1dfdb8ac5b5c393ab44fbe50887 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sat, 29 Mar 2025 17:48:57 -0700 Subject: [PATCH 448/623] feat: goto message --- src/lib/components/chat/Messages.svelte | 42 +++++++ .../components/chat/Messages/Message.svelte | 3 + .../Messages/MultiResponseMessages.svelte | 30 +++++ .../chat/Messages/ResponseMessage.svelte | 54 ++++++++- .../chat/Messages/UserMessage.svelte | 105 ++++++++++++++++-- 5 files changed, 218 insertions(+), 16 deletions(-) diff --git a/src/lib/components/chat/Messages.svelte b/src/lib/components/chat/Messages.svelte index dc3766cf244..b55a27b4003 100644 --- a/src/lib/components/chat/Messages.svelte +++ b/src/lib/components/chat/Messages.svelte @@ -107,6 +107,47 @@ } }; + const gotoMessage = async (message, idx) => { + // Determine the correct sibling list (either parent's children or root messages) + let siblings; + if (message.parentId !== null) { + siblings = history.messages[message.parentId].childrenIds; + } else { + siblings = Object.values(history.messages) + .filter((msg) => msg.parentId === null) + .map((msg) => msg.id); + } + + // Clamp index to a valid range + idx = Math.max(0, Math.min(idx, siblings.length - 1)); + + let messageId = siblings[idx]; + + // If we're navigating to a different message + if (message.id !== messageId) { + // Drill down to the deepest child of that branch + let messageChildrenIds = history.messages[messageId].childrenIds; + while (messageChildrenIds.length !== 0) { + messageId = messageChildrenIds.at(-1); + messageChildrenIds = history.messages[messageId].childrenIds; + } + + history.currentId = messageId; + } + + await tick(); + + // Optional auto-scroll + if ($settings?.scrollOnBranchChange ?? true) { + const element = document.getElementById('messages-container'); + autoScroll = element.scrollHeight - element.scrollTop <= element.clientHeight + 50; + + setTimeout(() => { + scrollToBottom(); + }, 100); + } + }; + const showPreviousMessage = async (message) => { if (message.parentId !== null) { let messageId = @@ -408,6 +449,7 @@ messageId={message.id} idx={messageIdx} {user} + {gotoMessage} {showPreviousMessage} {showNextMessage} {updateChat} diff --git a/src/lib/components/chat/Messages/Message.svelte b/src/lib/components/chat/Messages/Message.svelte index 9a2b7155cd4..9a30abfe5d7 100644 --- a/src/lib/components/chat/Messages/Message.svelte +++ b/src/lib/components/chat/Messages/Message.svelte @@ -20,6 +20,7 @@ export let user; + export let gotoMessage; export let showPreviousMessage; export let showNextMessage; export let updateChat; @@ -57,6 +58,7 @@ : (Object.values(history.messages) .filter((message) => message.parentId === null) .map((message) => message.id) ?? [])} + {gotoMessage} {showPreviousMessage} {showNextMessage} {editMessage} @@ -70,6 +72,7 @@ {messageId} isLastMessage={messageId === history.currentId} siblings={history.messages[history.messages[messageId].parentId]?.childrenIds ?? []} + {gotoMessage} {showPreviousMessage} {showNextMessage} {updateChat} diff --git a/src/lib/components/chat/Messages/MultiResponseMessages.svelte b/src/lib/components/chat/Messages/MultiResponseMessages.svelte index 1a8ceda79b2..c46be0e83a0 100644 --- a/src/lib/components/chat/Messages/MultiResponseMessages.svelte +++ b/src/lib/components/chat/Messages/MultiResponseMessages.svelte @@ -58,6 +58,35 @@ } } + const gotoMessage = async (modelIdx, messageIdx) => { + // Clamp messageIdx to ensure it's within valid range + groupedMessageIdsIdx[modelIdx] = Math.max( + 0, + Math.min(messageIdx, groupedMessageIds[modelIdx].messageIds.length - 1) + ); + + // Get the messageId at the specified index + let messageId = groupedMessageIds[modelIdx].messageIds[groupedMessageIdsIdx[modelIdx]]; + console.log(messageId); + + // Traverse the branch to find the deepest child message + let messageChildrenIds = history.messages[messageId].childrenIds; + while (messageChildrenIds.length !== 0) { + messageId = messageChildrenIds.at(-1); + messageChildrenIds = history.messages[messageId].childrenIds; + } + + // Update the current message ID in history + history.currentId = messageId; + + // Await UI updates + await tick(); + await updateChat(); + + // Trigger scrolling after navigation + triggerScroll(); + }; + const showPreviousMessage = async (modelIdx) => { groupedMessageIdsIdx[modelIdx] = Math.max(0, groupedMessageIdsIdx[modelIdx] - 1); @@ -224,6 +253,7 @@ messageId={_messageId} isLastMessage={true} siblings={groupedMessageIds[modelIdx].messageIds} + gotoMessage={(message, messageIdx) => gotoMessage(modelIdx, messageIdx)} showPreviousMessage={() => showPreviousMessage(modelIdx)} showNextMessage={() => showNextMessage(modelIdx)} {updateChat} diff --git a/src/lib/components/chat/Messages/ResponseMessage.svelte b/src/lib/components/chat/Messages/ResponseMessage.svelte index 1af5140dc6a..5b4fa1bc952 100644 --- a/src/lib/components/chat/Messages/ResponseMessage.svelte +++ b/src/lib/components/chat/Messages/ResponseMessage.svelte @@ -5,7 +5,7 @@ import { createEventDispatcher } from 'svelte'; import { onMount, tick, getContext } from 'svelte'; import type { Writable } from 'svelte/store'; - import type { i18n as i18nType } from 'i18next'; + import type { i18n as i18nType, t } from 'i18next'; const i18n = getContext>('i18n'); @@ -110,6 +110,7 @@ export let siblings; + export let gotoMessage: Function = () => {}; export let showPreviousMessage: Function; export let showNextMessage: Function; @@ -139,6 +140,8 @@ let editedContent = ''; let editTextAreaElement: HTMLTextAreaElement; + let messageIndexEdit = false; + let audioParts: Record = {}; let speaking = false; let speakingIdx: number | undefined; @@ -846,11 +849,50 @@ -
- {siblings.indexOf(message.id) + 1}/{siblings.length} -
+ {#if messageIndexEdit} +
+ { + e.target.select(); + }} + on:blur={(e) => { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + }} + on:keydown={(e) => { + if (e.key === 'Enter') { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + } + }} + class="bg-transparent font-semibold self-center dark:text-gray-100 min-w-fit outline-hidden" + />/{siblings.length} +
+ {:else} + +
{ + messageIndexEdit = true; + + await tick(); + const input = document.getElementById(`message-index-input-${message.id}`); + if (input) { + input.focus(); + input.select(); + } + }} + > + {siblings.indexOf(message.id) + 1}/{siblings.length} +
+ {/if} -
- {siblings.indexOf(message.id) + 1}/{siblings.length} -
+ {#if messageIndexEdit} +
+ { + e.target.select(); + }} + on:blur={(e) => { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + }} + on:keydown={(e) => { + if (e.key === 'Enter') { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + } + }} + class="bg-transparent font-semibold self-center dark:text-gray-100 min-w-fit outline-hidden" + />/{siblings.length} +
+ {:else} + +
{ + messageIndexEdit = true; + + await tick(); + const input = document.getElementById( + `message-index-input-${message.id}` + ); + if (input) { + input.focus(); + input.select(); + } + }} + > + {siblings.indexOf(message.id) + 1}/{siblings.length} +
+ {/if} -
- {siblings.indexOf(message.id) + 1}/{siblings.length} -
+ {#if messageIndexEdit} +
+ { + e.target.select(); + }} + on:blur={(e) => { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + }} + on:keydown={(e) => { + if (e.key === 'Enter') { + gotoMessage(message, e.target.value - 1); + messageIndexEdit = false; + } + }} + class="bg-transparent font-semibold self-center dark:text-gray-100 min-w-fit outline-hidden" + />/{siblings.length} +
+ {:else} + +
{ + messageIndexEdit = true; + + await tick(); + const input = document.getElementById( + `message-index-input-${message.id}` + ); + if (input) { + input.focus(); + input.select(); + } + }} + > + {siblings.indexOf(message.id) + 1}/{siblings.length} +
+ {/if}
+ {#if $config?.features?.enable_autocomplete_generation && richTextInput} +
+
+
+ {$i18n.t('Prompt Autocompletion')} +
+ + +
+
+ {/if} +
From d55735dc1e035f6da4c022b2ec6acde6567f6332 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 30 Mar 2025 21:23:42 -0700 Subject: [PATCH 459/623] refac: rm profile image from feedback user object --- backend/open_webui/routers/evaluations.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/routers/evaluations.py b/backend/open_webui/routers/evaluations.py index f0c4a6b0656..8597fa28635 100644 --- a/backend/open_webui/routers/evaluations.py +++ b/backend/open_webui/routers/evaluations.py @@ -56,8 +56,19 @@ async def update_config( } +class FeedbackUserReponse(BaseModel): + id: str + name: str + email: str + role: str = "pending" + + last_active_at: int # timestamp in epoch + updated_at: int # timestamp in epoch + created_at: int # timestamp in epoch + + class FeedbackUserResponse(FeedbackResponse): - user: Optional[UserModel] = None + user: Optional[FeedbackUserReponse] = None @router.get("/feedbacks/all", response_model=list[FeedbackUserResponse]) @@ -65,7 +76,10 @@ async def get_all_feedbacks(user=Depends(get_admin_user)): feedbacks = Feedbacks.get_all_feedbacks() return [ FeedbackUserResponse( - **feedback.model_dump(), user=Users.get_user_by_id(feedback.user_id) + **feedback.model_dump(), + user=FeedbackUserReponse( + **Users.get_user_by_id(feedback.user_id).model_dump() + ), ) for feedback in feedbacks ] From 33f93371dc830607c800c7024d67f2cc5a641340 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 30 Mar 2025 21:31:16 -0700 Subject: [PATCH 460/623] feat: user webhooks system settings --- backend/open_webui/config.py | 6 ++++ backend/open_webui/main.py | 3 ++ backend/open_webui/routers/auths.py | 9 ++++-- .../components/admin/Settings/General.svelte | 8 ++++++ .../components/chat/Settings/Account.svelte | 28 ++++++++++--------- 5 files changed, 39 insertions(+), 15 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index d8b7b98ed6b..f5f8135be4f 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1092,6 +1092,12 @@ def oidc_oauth_register(client): os.environ.get("ENABLE_MESSAGE_RATING", "True").lower() == "true", ) +ENABLE_USER_WEBHOOKS = PersistentConfig( + "ENABLE_USER_WEBHOOKS", + "ui.enable_user_webhooks", + os.environ.get("ENABLE_USER_WEBHOOKS", "True").lower() == "true", +) + def validate_cors_origins(origins): for origin in origins: diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 63d5149c79a..bb78d900346 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -253,6 +253,7 @@ ENABLE_CHANNELS, ENABLE_COMMUNITY_SHARING, ENABLE_MESSAGE_RATING, + ENABLE_USER_WEBHOOKS, ENABLE_EVALUATION_ARENA_MODELS, USER_PERMISSIONS, DEFAULT_USER_ROLE, @@ -519,6 +520,7 @@ async def lifespan(app: FastAPI): app.state.config.ENABLE_CHANNELS = ENABLE_CHANNELS app.state.config.ENABLE_COMMUNITY_SHARING = ENABLE_COMMUNITY_SHARING app.state.config.ENABLE_MESSAGE_RATING = ENABLE_MESSAGE_RATING +app.state.config.ENABLE_USER_WEBHOOKS = ENABLE_USER_WEBHOOKS app.state.config.ENABLE_EVALUATION_ARENA_MODELS = ENABLE_EVALUATION_ARENA_MODELS app.state.config.EVALUATION_ARENA_MODELS = EVALUATION_ARENA_MODELS @@ -1231,6 +1233,7 @@ async def get_app_config(request: Request): "enable_autocomplete_generation": app.state.config.ENABLE_AUTOCOMPLETE_GENERATION, "enable_community_sharing": app.state.config.ENABLE_COMMUNITY_SHARING, "enable_message_rating": app.state.config.ENABLE_MESSAGE_RATING, + "enable_user_webhooks": app.state.config.ENABLE_USER_WEBHOOKS, "enable_admin_export": ENABLE_ADMIN_EXPORT, "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS, "enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, diff --git a/backend/open_webui/routers/auths.py b/backend/open_webui/routers/auths.py index f30ae50c3fd..34a63ba3faf 100644 --- a/backend/open_webui/routers/auths.py +++ b/backend/open_webui/routers/auths.py @@ -639,11 +639,12 @@ async def get_admin_config(request: Request, user=Depends(get_admin_user)): "ENABLE_API_KEY": request.app.state.config.ENABLE_API_KEY, "ENABLE_API_KEY_ENDPOINT_RESTRICTIONS": request.app.state.config.ENABLE_API_KEY_ENDPOINT_RESTRICTIONS, "API_KEY_ALLOWED_ENDPOINTS": request.app.state.config.API_KEY_ALLOWED_ENDPOINTS, - "ENABLE_CHANNELS": request.app.state.config.ENABLE_CHANNELS, "DEFAULT_USER_ROLE": request.app.state.config.DEFAULT_USER_ROLE, "JWT_EXPIRES_IN": request.app.state.config.JWT_EXPIRES_IN, "ENABLE_COMMUNITY_SHARING": request.app.state.config.ENABLE_COMMUNITY_SHARING, "ENABLE_MESSAGE_RATING": request.app.state.config.ENABLE_MESSAGE_RATING, + "ENABLE_CHANNELS": request.app.state.config.ENABLE_CHANNELS, + "ENABLE_USER_WEBHOOKS": request.app.state.config.ENABLE_USER_WEBHOOKS, } @@ -654,11 +655,12 @@ class AdminConfig(BaseModel): ENABLE_API_KEY: bool ENABLE_API_KEY_ENDPOINT_RESTRICTIONS: bool API_KEY_ALLOWED_ENDPOINTS: str - ENABLE_CHANNELS: bool DEFAULT_USER_ROLE: str JWT_EXPIRES_IN: str ENABLE_COMMUNITY_SHARING: bool ENABLE_MESSAGE_RATING: bool + ENABLE_CHANNELS: bool + ENABLE_USER_WEBHOOKS: bool @router.post("/admin/config") @@ -693,6 +695,8 @@ async def update_admin_config( ) request.app.state.config.ENABLE_MESSAGE_RATING = form_data.ENABLE_MESSAGE_RATING + request.app.state.config.ENABLE_USER_WEBHOOKS = form_data.ENABLE_USER_WEBHOOKS + return { "SHOW_ADMIN_DETAILS": request.app.state.config.SHOW_ADMIN_DETAILS, "WEBUI_URL": request.app.state.config.WEBUI_URL, @@ -705,6 +709,7 @@ async def update_admin_config( "JWT_EXPIRES_IN": request.app.state.config.JWT_EXPIRES_IN, "ENABLE_COMMUNITY_SHARING": request.app.state.config.ENABLE_COMMUNITY_SHARING, "ENABLE_MESSAGE_RATING": request.app.state.config.ENABLE_MESSAGE_RATING, + "ENABLE_USER_WEBHOOKS": request.app.state.config.ENABLE_USER_WEBHOOKS, } diff --git a/src/lib/components/admin/Settings/General.svelte b/src/lib/components/admin/Settings/General.svelte index 78a15a648f7..5c50bf31100 100644 --- a/src/lib/components/admin/Settings/General.svelte +++ b/src/lib/components/admin/Settings/General.svelte @@ -609,6 +609,14 @@
+
+
+ {$i18n.t('User Webhooks')} +
+ + +
+
{$i18n.t('WebUI URL')}
diff --git a/src/lib/components/chat/Settings/Account.svelte b/src/lib/components/chat/Settings/Account.svelte index 6b3eba15326..997ec49c971 100644 --- a/src/lib/components/chat/Settings/Account.svelte +++ b/src/lib/components/chat/Settings/Account.svelte @@ -245,21 +245,23 @@
-
-
-
{$i18n.t('Notification Webhook')}
- -
- + {#if $config?.features?.enable_user_webhooks} +
+
+
{$i18n.t('Notification Webhook')}
+ +
+ +
-
+ {/if}
From 4b759664011673a4dcf2a0a6e99f5e7a522dcf2b Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 30 Mar 2025 21:55:15 -0700 Subject: [PATCH 461/623] refac: embedding prefix var naming --- backend/open_webui/config.py | 12 ++-- backend/open_webui/retrieval/utils.py | 92 +++++++++++++++++---------- 2 files changed, 63 insertions(+), 41 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index bc8b456ab58..ea4fea3c45f 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1783,16 +1783,12 @@ class BannerModel(BaseModel): ), ) -RAG_EMBEDDING_QUERY_PREFIX = ( - os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", None) -) +RAG_EMBEDDING_QUERY_PREFIX = os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", None) -RAG_EMBEDDING_PASSAGE_PREFIX = ( - os.environ.get("RAG_EMBEDDING_PASSAGE_PREFIX", None) -) +RAG_EMBEDDING_CONTENT_PREFIX = os.environ.get("RAG_EMBEDDING_CONTENT_PREFIX", None) -RAG_EMBEDDING_PREFIX_FIELD_NAME = ( - os.environ.get("RAG_EMBEDDING_PREFIX_FIELD_NAME", None) +RAG_EMBEDDING_PREFIX_FIELD_NAME = os.environ.get( + "RAG_EMBEDDING_PREFIX_FIELD_NAME", None ) RAG_RERANKING_MODEL = PersistentConfig( diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index c2fa264d651..bcffbc139ee 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -25,9 +25,9 @@ ENABLE_FORWARD_USER_INFO_HEADERS, ) from open_webui.config import ( - RAG_EMBEDDING_QUERY_PREFIX, - RAG_EMBEDDING_PASSAGE_PREFIX, - RAG_EMBEDDING_PREFIX_FIELD_NAME + RAG_EMBEDDING_QUERY_PREFIX, + RAG_EMBEDDING_CONTENT_PREFIX, + RAG_EMBEDDING_PREFIX_FIELD_NAME, ) log = logging.getLogger(__name__) @@ -53,7 +53,7 @@ def _get_relevant_documents( ) -> list[Document]: result = VECTOR_DB_CLIENT.search( collection_name=self.collection_name, - vectors=[self.embedding_function(query,RAG_EMBEDDING_QUERY_PREFIX)], + vectors=[self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)], limit=self.top_k, ) @@ -334,7 +334,9 @@ def get_embedding_function( embedding_batch_size, ): if embedding_engine == "": - return lambda query, prefix, user=None: embedding_function.encode(query, prompt = prefix if prefix else None).tolist() + return lambda query, prefix, user=None: embedding_function.encode( + query, prompt=prefix if prefix else None + ).tolist() elif embedding_engine in ["ollama", "openai"]: func = lambda query, prefix, user=None: generate_embeddings( engine=embedding_engine, @@ -345,22 +347,29 @@ def get_embedding_function( key=key, user=user, ) + def generate_multiple(query, prefix, user, func): if isinstance(query, list): embeddings = [] for i in range(0, len(query), embedding_batch_size): embeddings.extend( - func(query[i : i + embedding_batch_size], prefix=prefix, user=user) + func( + query[i : i + embedding_batch_size], + prefix=prefix, + user=user, + ) ) return embeddings else: return func(query, prefix, user) - return lambda query, prefix, user=None: generate_multiple(query, prefix, user, func) + + return lambda query, prefix, user=None: generate_multiple( + query, prefix, user, func + ) else: raise ValueError(f"Unknown embedding engine: {embedding_engine}") - def get_sources_from_files( request, files, @@ -579,14 +588,11 @@ def generate_openai_batch_embeddings( url: str = "https://api.openai.com/v1", key: str = "", prefix: str = None, - user: UserModel = None + user: UserModel = None, ) -> Optional[list[list[float]]]: try: - json_data = { - "input": texts, - "model": model - } - if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str): + json_data = {"input": texts, "model": model} + if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME, str) and isinstance(prefix, str): json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix r = requests.post( @@ -619,21 +625,18 @@ def generate_openai_batch_embeddings( def generate_ollama_batch_embeddings( - model: str, + model: str, texts: list[str], url: str, - key: str = "", - prefix: str = None, - user: UserModel = None + key: str = "", + prefix: str = None, + user: UserModel = None, ) -> Optional[list[list[float]]]: try: - json_data = { - "input": texts, - "model": model - } - if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str): + json_data = {"input": texts, "model": model} + if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME, str) and isinstance(prefix, str): json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix - + r = requests.post( f"{url}/api/embed", headers={ @@ -664,32 +667,56 @@ def generate_ollama_batch_embeddings( return None -def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], prefix: Union[str , None] = None, **kwargs): +def generate_embeddings( + engine: str, + model: str, + text: Union[str, list[str]], + prefix: Union[str, None] = None, + **kwargs, +): url = kwargs.get("url", "") key = kwargs.get("key", "") user = kwargs.get("user") if prefix is not None and RAG_EMBEDDING_PREFIX_FIELD_NAME is None: if isinstance(text, list): - text = [f'{prefix}{text_element}' for text_element in text] + text = [f"{prefix}{text_element}" for text_element in text] else: - text = f'{prefix}{text}' + text = f"{prefix}{text}" if engine == "ollama": if isinstance(text, list): embeddings = generate_ollama_batch_embeddings( - **{"model": model, "texts": text, "url": url, "key": key, "prefix": prefix, "user": user} + **{ + "model": model, + "texts": text, + "url": url, + "key": key, + "prefix": prefix, + "user": user, + } ) else: embeddings = generate_ollama_batch_embeddings( - **{"model": model, "texts": [text], "url": url, "key": key, "prefix": prefix, "user": user} + **{ + "model": model, + "texts": [text], + "url": url, + "key": key, + "prefix": prefix, + "user": user, + } ) return embeddings[0] if isinstance(text, str) else embeddings elif engine == "openai": if isinstance(text, list): - embeddings = generate_openai_batch_embeddings(model, text, url, key, prefix, user) + embeddings = generate_openai_batch_embeddings( + model, text, url, key, prefix, user + ) else: - embeddings = generate_openai_batch_embeddings(model, [text], url, key, prefix, user) + embeddings = generate_openai_batch_embeddings( + model, [text], url, key, prefix, user + ) return embeddings[0] if isinstance(text, str) else embeddings @@ -727,8 +754,7 @@ def compress_documents( query_embedding = self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX) document_embedding = self.embedding_function( - [doc.page_content for doc in documents], - RAG_EMBEDDING_PASSAGE_PREFIX + [doc.page_content for doc in documents], RAG_EMBEDDING_CONTENT_PREFIX ) scores = util.cos_sim(query_embedding, document_embedding)[0] From d542881ee4083d61262cac3d8211ad9fb04135e0 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 30 Mar 2025 21:55:20 -0700 Subject: [PATCH 462/623] refac --- backend/open_webui/routers/retrieval.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 24e7ceb981c..abca72f111e 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -82,8 +82,8 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE, UPLOAD_DIR, DEFAULT_LOCALE, - RAG_EMBEDDING_PASSAGE_PREFIX, - RAG_EMBEDDING_QUERY_PREFIX + RAG_EMBEDDING_CONTENT_PREFIX, + RAG_EMBEDDING_QUERY_PREFIX, ) from open_webui.env import ( SRC_LOG_LEVELS, @@ -892,7 +892,9 @@ def _get_docs_info(docs: list[Document]) -> str: ) embeddings = embedding_function( - list(map(lambda x: x.replace("\n", " "), texts)), prefix=RAG_EMBEDDING_PASSAGE_PREFIX, user=user + list(map(lambda x: x.replace("\n", " "), texts)), + prefix=RAG_EMBEDDING_CONTENT_PREFIX, + user=user, ) items = [ @@ -1536,7 +1538,6 @@ def query_doc_handler( query_embedding=request.app.state.EMBEDDING_FUNCTION( form_data.query, prefix=RAG_EMBEDDING_QUERY_PREFIX, user=user ), - k=form_data.k if form_data.k else request.app.state.config.TOP_K, user=user, ) @@ -1663,7 +1664,11 @@ def reset_upload_dir(user=Depends(get_admin_user)) -> bool: @router.get("/ef/{text}") async def get_embeddings(request: Request, text: Optional[str] = "Hello World!"): - return {"result": request.app.state.EMBEDDING_FUNCTION(text, RAG_EMBEDDING_QUERY_PREFIX)} + return { + "result": request.app.state.EMBEDDING_FUNCTION( + text, RAG_EMBEDDING_QUERY_PREFIX + ) + } class BatchProcessFilesForm(BaseModel): From 337df80c4752d88c9bacfb22543e321144913f18 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Sun, 30 Mar 2025 23:17:06 -0700 Subject: [PATCH 463/623] refac: styling --- src/lib/components/chat/MessageInput.svelte | 50 +------------------ .../chat/MessageInput/InputMenu.svelte | 4 +- 2 files changed, 3 insertions(+), 51 deletions(-) diff --git a/src/lib/components/chat/MessageInput.svelte b/src/lib/components/chat/MessageInput.svelte index cd2699c0047..0fc65085cad 100644 --- a/src/lib/components/chat/MessageInput.svelte +++ b/src/lib/components/chat/MessageInput.svelte @@ -424,54 +424,6 @@
{/if} - {#if webSearchEnabled || ($config?.features?.enable_web_search && ($settings?.webSearch ?? false)) === 'always'} -
-
-
- - - - -
-
{$i18n.t('Search the internet')}
-
-
- {/if} - - {#if imageGenerationEnabled} -
-
-
- - - - -
-
{$i18n.t('Generate an image')}
-
-
- {/if} - - {#if codeInterpreterEnabled} -
-
-
- - - - -
-
{$i18n.t('Execute code for analysis')}
-
-
- {/if} - {#if atSelectedModel !== undefined}
@@ -583,7 +535,7 @@ }} >
{#if files.length > 0} diff --git a/src/lib/components/chat/MessageInput/InputMenu.svelte b/src/lib/components/chat/MessageInput/InputMenu.svelte index ff97f007672..07f337dcbf5 100644 --- a/src/lib/components/chat/MessageInput/InputMenu.svelte +++ b/src/lib/components/chat/MessageInput/InputMenu.svelte @@ -94,8 +94,8 @@
Date: Sun, 30 Mar 2025 23:36:15 -0700 Subject: [PATCH 464/623] refac: folders --- backend/open_webui/models/folders.py | 19 +++++++++++++------ backend/open_webui/routers/folders.py | 17 +++++++++++++++-- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/backend/open_webui/models/folders.py b/backend/open_webui/models/folders.py index 19739bc5f5d..1c97de26c96 100644 --- a/backend/open_webui/models/folders.py +++ b/backend/open_webui/models/folders.py @@ -9,6 +9,8 @@ from open_webui.env import SRC_LOG_LEVELS from pydantic import BaseModel, ConfigDict from sqlalchemy import BigInteger, Column, Text, JSON, Boolean +from open_webui.utils.access_control import get_permissions + log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MODELS"]) @@ -234,15 +236,18 @@ def update_folder_is_expanded_by_id_and_user_id( log.error(f"update_folder: {e}") return - def delete_folder_by_id_and_user_id(self, id: str, user_id: str) -> bool: + def delete_folder_by_id_and_user_id( + self, id: str, user_id: str, delete_chats=True + ) -> bool: try: with get_db() as db: folder = db.query(Folder).filter_by(id=id, user_id=user_id).first() if not folder: return False - # Delete all chats in the folder - Chats.delete_chats_by_user_id_and_folder_id(user_id, folder.id) + if delete_chats: + # Delete all chats in the folder + Chats.delete_chats_by_user_id_and_folder_id(user_id, folder.id) # Delete all children folders def delete_children(folder): @@ -250,9 +255,11 @@ def delete_children(folder): folder.id, user_id ) for folder_child in folder_children: - Chats.delete_chats_by_user_id_and_folder_id( - user_id, folder_child.id - ) + if delete_chats: + Chats.delete_chats_by_user_id_and_folder_id( + user_id, folder_child.id + ) + delete_children(folder_child) folder = db.query(Folder).filter_by(id=folder_child.id).first() diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py index ca2fbd2132c..cf37f9329da 100644 --- a/backend/open_webui/routers/folders.py +++ b/backend/open_webui/routers/folders.py @@ -20,11 +20,13 @@ from open_webui.constants import ERROR_MESSAGES -from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status +from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status, Request from fastapi.responses import FileResponse, StreamingResponse from open_webui.utils.auth import get_admin_user, get_verified_user +from open_webui.utils.access_control import has_permission + log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["MODELS"]) @@ -228,7 +230,18 @@ async def update_folder_is_expanded_by_id( @router.delete("/{id}") -async def delete_folder_by_id(id: str, user=Depends(get_verified_user)): +async def delete_folder_by_id( + request: Request, id: str, user=Depends(get_verified_user) +): + chat_delete_permission = has_permission( + user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS + ) + if not chat_delete_permission: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail=ERROR_MESSAGES.ACCESS_PROHIBITED, + ) + folder = Folders.get_folder_by_id_and_user_id(id, user.id) if folder: try: From 6e190bebe8db5c48ccf17c9b76f00b93b43af018 Mon Sep 17 00:00:00 2001 From: Aleix Dorca Date: Mon, 31 Mar 2025 09:12:48 +0200 Subject: [PATCH 465/623] Update catalan translation.json --- src/lib/i18n/locales/ca-ES/translation.json | 30 ++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index 6fa3fd009ef..51279650c69 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -4,9 +4,9 @@ "(e.g. `sh webui.sh --api --api-auth username_password`)": "(p. ex. `sh webui.sh --api --api-auth username_password`)", "(e.g. `sh webui.sh --api`)": "(p. ex. `sh webui.sh --api`)", "(latest)": "(últim)", - "(Ollama)": "", + "(Ollama)": "(Ollama)", "{{ models }}": "{{ models }}", - "{{COUNT}} Available Tool Servers": "", + "{{COUNT}} Available Tool Servers": "{{COUNT}} Servidors d'eines disponibles", "{{COUNT}} hidden lines": "{{COUNT}} línies ocultes", "{{COUNT}} Replies": "{{COUNT}} respostes", "{{user}}'s Chats": "Els xats de {{user}}", @@ -119,7 +119,7 @@ "AUTOMATIC1111 Base URL": "URL Base d'AUTOMATIC1111", "AUTOMATIC1111 Base URL is required.": "Es requereix l'URL Base d'AUTOMATIC1111.", "Available list": "Llista de disponibles", - "Available Tool Servers": "", + "Available Tool Servers": "Servidors d'eines disponibles", "available!": "disponible!", "Awful": "Terrible", "Azure AI Speech": "Azure AI Speech", @@ -217,7 +217,7 @@ "Confirm your action": "Confirma la teva acció", "Confirm your new password": "Confirma la teva nova contrasenya", "Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI", - "Connect to your own OpenAPI compatible external tool servers.": "", + "Connect to your own OpenAPI compatible external tool servers.": "Connecta als teus propis servidors d'eines externs compatibles amb OpenAPI", "Connections": "Connexions", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament.", "Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI", @@ -344,7 +344,7 @@ "Draw": "Dibuixar", "Drop any files here to add to the conversation": "Deixa qualsevol arxiu aquí per afegir-lo a la conversa", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "p. ex. '30s','10m'. Les unitats de temps vàlides són 's', 'm', 'h'.", - "e.g. \"json\" or a JSON schema": "", + "e.g. \"json\" or a JSON schema": "p. ex. \"json\" o un esquema JSON", "e.g. 60": "p. ex. 60", "e.g. A filter to remove profanity from text": "p. ex. Un filtre per eliminar paraules malsonants del text", "e.g. My Filter": "p. ex. El meu filtre", @@ -441,7 +441,7 @@ "Enter timeout in seconds": "Entra el temps màxim en segons", "Enter to Send": "Enter per enviar", "Enter Top K": "Introdueix Top K", - "Enter Top K Reranker": "", + "Enter Top K Reranker": "Introdueix el Top K Reranker", "Enter URL (e.g. http://127.0.0.1:7860/)": "Introdueix l'URL (p. ex. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Introdueix l'URL (p. ex. http://localhost:11434)", "Enter your current password": "Introdueix la teva contrasenya actual", @@ -467,7 +467,7 @@ "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "S'ha superat el nombre de places a la vostra llicència. Poseu-vos en contacte amb el servei d'assistència per augmentar el nombre de places.", "Exclude": "Excloure", "Execute code for analysis": "Executar el codi per analitzar-lo", - "Executing `{{NAME}}`...": "", + "Executing `{{NAME}}`...": "Executant `{{NAME}}`...", "Expand": "Expandir", "Experimental": "Experimental", "Explain": "Explicar", @@ -488,7 +488,7 @@ "External": "Extern", "External Models": "Models externs", "Failed to add file.": "No s'ha pogut afegir l'arxiu.", - "Failed to connect to {{URL}} OpenAPI tool server": "", + "Failed to connect to {{URL}} OpenAPI tool server": "No s'ha pogut connecta al servidor d'eines OpenAPI {{URL}}", "Failed to create API Key.": "No s'ha pogut crear la clau API.", "Failed to fetch models": "No s'han pogut obtenir els models", "Failed to read clipboard contents": "No s'ha pogut llegir el contingut del porta-retalls", @@ -606,7 +606,7 @@ "Integration": "Integració", "Interface": "Interfície", "Invalid file format.": "Format d'arxiu no vàlid.", - "Invalid JSON schema": "", + "Invalid JSON schema": "Esquema JSON no vàlid", "Invalid Tag": "Etiqueta no vàlida", "is typing...": "està escrivint...", "January": "Gener", @@ -642,8 +642,8 @@ "LDAP server updated": "Servidor LDAP actualitzat", "Leaderboard": "Tauler de classificació", "Leave empty for unlimited": "Deixar-ho buit per il·limitat", - "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{url}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "Deixar-ho buit per incloure tots els models del punt de connexió \"{{url}}/api/tags\"", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "Deixar-ho buit per incloure tots els models del punt de connexió \"{{url}}/models\"", "Leave empty to include all models or select specific models": "Deixa-ho en blanc per incloure tots els models o selecciona models específics", "Leave empty to use the default prompt, or enter a custom prompt": "Deixa-ho en blanc per utilitzar la indicació predeterminada o introdueix una indicació personalitzada", "Leave model field empty to use the default model.": "Deixa el camp de model buit per utilitzar el model per defecte.", @@ -670,7 +670,7 @@ "Manage Ollama API Connections": "Gestionar les connexions a l'API d'Ollama", "Manage OpenAI API Connections": "Gestionar les connexions a l'API d'OpenAI", "Manage Pipelines": "Gestionar les Pipelines", - "Manage Tool Servers": "", + "Manage Tool Servers": "Gestionar els servidors d'eines", "March": "Març", "Max Tokens (num_predict)": "Nombre màxim de Tokens (num_predict)", "Max Upload Count": "Nombre màxim de càrregues", @@ -1087,7 +1087,7 @@ "Tools have a function calling system that allows arbitrary code execution": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari", "Tools have a function calling system that allows arbitrary code execution.": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari.", "Top K": "Top K", - "Top K Reranker": "", + "Top K Reranker": "Top K Reranker", "Top P": "Top P", "Transformers": "Transformadors", "Trouble accessing Ollama?": "Problemes en accedir a Ollama?", @@ -1146,7 +1146,7 @@ "Version": "Versió", "Version {{selectedVersion}} of {{totalVersions}}": "Versió {{selectedVersion}} de {{totalVersions}}", "View Replies": "Veure les respostes", - "View Result from `{{NAME}}`": "", + "View Result from `{{NAME}}`": "Veure el resultat de `{{NAME}}`", "Visibility": "Visibilitat", "Voice": "Veu", "Voice Input": "Entrada de veu", @@ -1166,7 +1166,7 @@ "WebUI URL": "URL de WebUI", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI farà peticions a \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI farà peticions a \"{{url}}/chat/completions\"", - "WebUI will make requests to \"{{url}}/openapi.json\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "WebUI farà peticions a \"{{url}}/openapi.json\"", "What are you trying to achieve?": "Què intentes aconseguir?", "What are you working on?": "En què estàs treballant?", "What’s New in": "Què hi ha de nou a", From 300b7dfcc083495e230470186a41f1d0e5cbec4a Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 00:39:20 -0700 Subject: [PATCH 466/623] fix: model import/export --- src/lib/components/workspace/Models.svelte | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/lib/components/workspace/Models.svelte b/src/lib/components/workspace/Models.svelte index 9a01f3fd8c8..3c509a0bcd9 100644 --- a/src/lib/components/workspace/Models.svelte +++ b/src/lib/components/workspace/Models.svelte @@ -430,6 +430,12 @@ return null; }); } + } else { + if (model?.id && model?.name) { + await createNewModel(localStorage.token, model).catch((error) => { + return null; + }); + } } } @@ -474,7 +480,7 @@
- - -
- - { - toggleModelHandler(model); + {#if shiftKey} + + -
+ {:else} + + + { + exportModelHandler(model); + }} + hideHandler={() => { + hideModelHandler(model); + }} + onClose={() => {}} + > + + + +
+ + { + toggleModelHandler(model); + }} + /> + +
+ {/if}
{/each} diff --git a/src/lib/components/admin/Settings/Models/ModelMenu.svelte b/src/lib/components/admin/Settings/Models/ModelMenu.svelte new file mode 100644 index 00000000000..88465e42e2e --- /dev/null +++ b/src/lib/components/admin/Settings/Models/ModelMenu.svelte @@ -0,0 +1,116 @@ + + + { + if (e.detail === false) { + onClose(); + } + }} +> + + + + +
+ + { + hideHandler(); + }} + > + {#if model?.meta?.hidden ?? false} + + + + {:else} + + + + + {/if} + +
+ {#if model?.meta?.hidden ?? false} + {$i18n.t('Show Model')} + {:else} + {$i18n.t('Hide Model')} + {/if} +
+
+ + { + exportHandler(); + }} + > + + +
{$i18n.t('Export')}
+
+
+
+
diff --git a/src/lib/components/chat/ModelSelector/Selector.svelte b/src/lib/components/chat/ModelSelector/Selector.svelte index 226f5b1bd5f..47f06f8f5d6 100644 --- a/src/lib/components/chat/ModelSelector/Selector.svelte +++ b/src/lib/components/chat/ModelSelector/Selector.svelte @@ -458,174 +458,176 @@ {/if} {#each filteredItems as item, index} - + {#if value === item.value} +
+ +
+ {/if} + + {/if} {:else}
diff --git a/src/lib/components/icons/Eye.svelte b/src/lib/components/icons/Eye.svelte new file mode 100644 index 00000000000..5af95a9e7de --- /dev/null +++ b/src/lib/components/icons/Eye.svelte @@ -0,0 +1,20 @@ + + + + + + From 50b3f47f81bf5abab8301fdf0ee913877b913940 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 17:15:51 -0700 Subject: [PATCH 483/623] feat: public sharing permissions --- backend/open_webui/config.py | 36 ++++++++++++++++ backend/open_webui/routers/users.py | 11 +++++ .../admin/Users/Groups/Permissions.svelte | 41 +++++++++++++++++++ 3 files changed, 88 insertions(+) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index ea4fea3c45f..2d66e37b6c8 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -980,6 +980,35 @@ def oidc_oauth_register(client): os.environ.get("USER_PERMISSIONS_WORKSPACE_TOOLS_ACCESS", "False").lower() == "true" ) +USER_PERMISSIONS_WORKSPACE_MODELS_ALLOW_PUBLIC_SHARING = ( + os.environ.get( + "USER_PERMISSIONS_WORKSPACE_MODELS_ALLOW_PUBLIC_SHARING", "False" + ).lower() + == "true" +) + +USER_PERMISSIONS_WORKSPACE_KNOWLEDGE_ALLOW_PUBLIC_SHARING = ( + os.environ.get( + "USER_PERMISSIONS_WORKSPACE_KNOWLEDGE_ALLOW_PUBLIC_SHARING", "False" + ).lower() + == "true" +) + +USER_PERMISSIONS_WORKSPACE_PROMPTS_ALLOW_PUBLIC_SHARING = ( + os.environ.get( + "USER_PERMISSIONS_WORKSPACE_PROMPTS_ALLOW_PUBLIC_SHARING", "False" + ).lower() + == "true" +) + +USER_PERMISSIONS_WORKSPACE_TOOLS_ALLOW_PUBLIC_SHARING = ( + os.environ.get( + "USER_PERMISSIONS_WORKSPACE_TOOLS_ALLOW_PUBLIC_SHARING", "False" + ).lower() + == "true" +) + + USER_PERMISSIONS_CHAT_CONTROLS = ( os.environ.get("USER_PERMISSIONS_CHAT_CONTROLS", "True").lower() == "true" ) @@ -1000,6 +1029,7 @@ def oidc_oauth_register(client): os.environ.get("USER_PERMISSIONS_CHAT_TEMPORARY", "True").lower() == "true" ) + USER_PERMISSIONS_FEATURES_WEB_SEARCH = ( os.environ.get("USER_PERMISSIONS_FEATURES_WEB_SEARCH", "True").lower() == "true" ) @@ -1022,6 +1052,12 @@ def oidc_oauth_register(client): "prompts": USER_PERMISSIONS_WORKSPACE_PROMPTS_ACCESS, "tools": USER_PERMISSIONS_WORKSPACE_TOOLS_ACCESS, }, + "sharing": { + "public_models": USER_PERMISSIONS_WORKSPACE_MODELS_ALLOW_PUBLIC_SHARING, + "public_knowledge": USER_PERMISSIONS_WORKSPACE_KNOWLEDGE_ALLOW_PUBLIC_SHARING, + "public_prompts": USER_PERMISSIONS_WORKSPACE_PROMPTS_ALLOW_PUBLIC_SHARING, + "public_tools": USER_PERMISSIONS_WORKSPACE_TOOLS_ALLOW_PUBLIC_SHARING, + }, "chat": { "controls": USER_PERMISSIONS_CHAT_CONTROLS, "file_upload": USER_PERMISSIONS_CHAT_FILE_UPLOAD, diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index f5349faa36a..825a397230a 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -76,6 +76,13 @@ class WorkspacePermissions(BaseModel): tools: bool = False +class SharingPermissions(BaseModel): + public_models: bool = True + public_knowledge: bool = True + public_prompts: bool = True + public_tools: bool = True + + class ChatPermissions(BaseModel): controls: bool = True file_upload: bool = True @@ -92,6 +99,7 @@ class FeaturesPermissions(BaseModel): class UserPermissions(BaseModel): workspace: WorkspacePermissions + sharing: SharingPermissions chat: ChatPermissions features: FeaturesPermissions @@ -102,6 +110,9 @@ async def get_default_user_permissions(request: Request, user=Depends(get_admin_ "workspace": WorkspacePermissions( **request.app.state.config.USER_PERMISSIONS.get("workspace", {}) ), + "sharing": SharingPermissions( + **request.app.state.config.USER_PERMISSIONS.get("sharing", {}) + ), "chat": ChatPermissions( **request.app.state.config.USER_PERMISSIONS.get("chat", {}) ), diff --git a/src/lib/components/admin/Users/Groups/Permissions.svelte b/src/lib/components/admin/Users/Groups/Permissions.svelte index f41ac206b03..157c8f7caa9 100644 --- a/src/lib/components/admin/Users/Groups/Permissions.svelte +++ b/src/lib/components/admin/Users/Groups/Permissions.svelte @@ -13,6 +13,12 @@ prompts: false, tools: false }, + sharing: { + public_models: false, + public_knowledge: false, + public_prompts: false, + public_tools: false + }, chat: { controls: true, delete: true, @@ -39,6 +45,7 @@ ...defaults, ...obj, workspace: { ...defaults.workspace, ...obj.workspace }, + sharing: { ...defaults.sharing, ...obj.sharing }, chat: { ...defaults.chat, ...obj.chat }, features: { ...defaults.features, ...obj.features } }; @@ -194,6 +201,40 @@
+
+
{$i18n.t('Sharing Permissions')}
+ +
+
+ {$i18n.t('Models Public Sharing')} +
+ +
+ +
+
+ {$i18n.t('Knowledge Public Sharing')} +
+ +
+ +
+
+ {$i18n.t('Prompts Public Sharing')} +
+ +
+ +
+
+ {$i18n.t('Tools Public Sharing')} +
+ +
+
+ +
+
{$i18n.t('Chat Permissions')}
From 580965df173cfccd35cf4f94aaec06f676ecad90 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 17:28:25 -0700 Subject: [PATCH 484/623] feat: public sharing permissions Co-Authored-By: Taylor Wilsdon <6508528+taylorwilsdon@users.noreply.github.com> --- .../Knowledge/CreateKnowledgeBase.svelte | 8 +++- .../workspace/Knowledge/KnowledgeBase.svelte | 3 +- .../workspace/Models/ModelEditor.svelte | 6 ++- .../workspace/Prompts/PromptEditor.svelte | 2 + .../workspace/Tools/ToolkitEditor.svelte | 2 + .../workspace/common/AccessControl.svelte | 42 +++++++++++++++++-- .../common/AccessControlModal.svelte | 3 +- 7 files changed, 57 insertions(+), 9 deletions(-) diff --git a/src/lib/components/workspace/Knowledge/CreateKnowledgeBase.svelte b/src/lib/components/workspace/Knowledge/CreateKnowledgeBase.svelte index 586564cd792..fefbbefcda5 100644 --- a/src/lib/components/workspace/Knowledge/CreateKnowledgeBase.svelte +++ b/src/lib/components/workspace/Knowledge/CreateKnowledgeBase.svelte @@ -5,7 +5,7 @@ import { createNewKnowledge, getKnowledgeBases } from '$lib/apis/knowledge'; import { toast } from 'svelte-sonner'; - import { knowledge } from '$lib/stores'; + import { knowledge, user } from '$lib/stores'; import AccessControl from '../common/AccessControl.svelte'; let loading = false; @@ -112,7 +112,11 @@
- +
diff --git a/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte b/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte index 07ca0f1ed9e..c6f47e8def4 100644 --- a/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte +++ b/src/lib/components/workspace/Knowledge/KnowledgeBase.svelte @@ -9,7 +9,7 @@ import { goto } from '$app/navigation'; import { page } from '$app/stores'; - import { mobile, showSidebar, knowledge as _knowledge, config } from '$lib/stores'; + import { mobile, showSidebar, knowledge as _knowledge, config, user } from '$lib/stores'; import { updateFileDataContentById, uploadFile, deleteFileById } from '$lib/apis/files'; import { @@ -619,6 +619,7 @@ { changeDebounceHandler(); }} diff --git a/src/lib/components/workspace/Models/ModelEditor.svelte b/src/lib/components/workspace/Models/ModelEditor.svelte index 170c37f228b..4bd875ee274 100644 --- a/src/lib/components/workspace/Models/ModelEditor.svelte +++ b/src/lib/components/workspace/Models/ModelEditor.svelte @@ -530,7 +530,11 @@
- +
diff --git a/src/lib/components/workspace/Prompts/PromptEditor.svelte b/src/lib/components/workspace/Prompts/PromptEditor.svelte index 76ae9f8512c..4abe5c067e9 100644 --- a/src/lib/components/workspace/Prompts/PromptEditor.svelte +++ b/src/lib/components/workspace/Prompts/PromptEditor.svelte @@ -7,6 +7,7 @@ import AccessControl from '../common/AccessControl.svelte'; import LockClosed from '$lib/components/icons/LockClosed.svelte'; import AccessControlModal from '../common/AccessControlModal.svelte'; + import { user } from '$lib/stores'; export let onSubmit: Function; export let edit = false; @@ -72,6 +73,7 @@ bind:show={showAccessControlModal} bind:accessControl accessRoles={['read', 'write']} + allowPublic={$user?.permissions?.sharing?.public_prompts || $user?.role === 'admin'} />
diff --git a/src/lib/components/workspace/Tools/ToolkitEditor.svelte b/src/lib/components/workspace/Tools/ToolkitEditor.svelte index 63a54ab24d7..6057be6cb58 100644 --- a/src/lib/components/workspace/Tools/ToolkitEditor.svelte +++ b/src/lib/components/workspace/Tools/ToolkitEditor.svelte @@ -11,6 +11,7 @@ import Tooltip from '$lib/components/common/Tooltip.svelte'; import LockClosed from '$lib/components/icons/LockClosed.svelte'; import AccessControlModal from '../common/AccessControlModal.svelte'; + import { user } from '$lib/stores'; let formElement = null; let loading = false; @@ -183,6 +184,7 @@ class Tools: bind:show={showAccessControlModal} bind:accessControl accessRoles={['read', 'write']} + allowPublic={$user?.permissions?.sharing?.public_tools || $user?.role === 'admin'} />
diff --git a/src/lib/components/workspace/common/AccessControl.svelte b/src/lib/components/workspace/common/AccessControl.svelte index e4c6e3e48ee..9c3e0dd8b2b 100644 --- a/src/lib/components/workspace/common/AccessControl.svelte +++ b/src/lib/components/workspace/common/AccessControl.svelte @@ -15,14 +15,44 @@ export let accessRoles = ['read']; export let accessControl = null; + export let allowPublic = true; + let selectedGroupId = ''; let groups = []; + $: if (!allowPublic && accessControl === null) { + accessControl = { + read: { + group_ids: [], + user_ids: [] + }, + write: { + group_ids: [], + user_ids: [] + } + }; + onChange(accessControl); + } + onMount(async () => { groups = await getGroups(localStorage.token); if (accessControl === null) { - accessControl = null; + if (allowPublic) { + accessControl = null; + } else { + accessControl = { + read: { + group_ids: [], + user_ids: [] + }, + write: { + group_ids: [], + user_ids: [] + } + }; + onChange(accessControl); + } } else { accessControl = { read: { @@ -104,17 +134,21 @@ } else { accessControl = { read: { - group_ids: [] + group_ids: [], + user_ids: [] }, write: { - group_ids: [] + group_ids: [], + user_ids: [] } }; } }} > - + {#if allowPublic} + + {/if}
diff --git a/src/lib/components/workspace/common/AccessControlModal.svelte b/src/lib/components/workspace/common/AccessControlModal.svelte index cc7c59c8683..d694082630b 100644 --- a/src/lib/components/workspace/common/AccessControlModal.svelte +++ b/src/lib/components/workspace/common/AccessControlModal.svelte @@ -8,6 +8,7 @@ export let show = false; export let accessControl = null; export let accessRoles = ['read']; + export let allowPublic = true; export let onChange = () => {}; @@ -38,7 +39,7 @@
- +
From 5f792d27717abc0747f9bfb2847658124fc04b5c Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 17:58:43 -0700 Subject: [PATCH 485/623] feat: enforced temporary chat --- backend/open_webui/config.py | 5 +++++ backend/open_webui/routers/users.py | 1 + src/lib/components/admin/Users/Groups.svelte | 9 ++++++++- .../admin/Users/Groups/Permissions.svelte | 13 ++++++++++++- src/lib/components/chat/ModelSelector.svelte | 3 ++- src/routes/(app)/+layout.svelte | 6 ++++++ 6 files changed, 34 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 2d66e37b6c8..0ac92bd23bd 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1029,6 +1029,10 @@ def oidc_oauth_register(client): os.environ.get("USER_PERMISSIONS_CHAT_TEMPORARY", "True").lower() == "true" ) +USER_PERMISSIONS_CHAT_TEMPORARY_ENFORCED = ( + os.environ.get("USER_PERMISSIONS_CHAT_TEMPORARY_ENFORCED", "False").lower() + == "true" +) USER_PERMISSIONS_FEATURES_WEB_SEARCH = ( os.environ.get("USER_PERMISSIONS_FEATURES_WEB_SEARCH", "True").lower() == "true" @@ -1064,6 +1068,7 @@ def oidc_oauth_register(client): "delete": USER_PERMISSIONS_CHAT_DELETE, "edit": USER_PERMISSIONS_CHAT_EDIT, "temporary": USER_PERMISSIONS_CHAT_TEMPORARY, + "temporary_enforced": USER_PERMISSIONS_CHAT_TEMPORARY_ENFORCED, }, "features": { "web_search": USER_PERMISSIONS_FEATURES_WEB_SEARCH, diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index 825a397230a..4cf9102e144 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -89,6 +89,7 @@ class ChatPermissions(BaseModel): delete: bool = True edit: bool = True temporary: bool = True + temporary_enforced: bool = False class FeaturesPermissions(BaseModel): diff --git a/src/lib/components/admin/Users/Groups.svelte b/src/lib/components/admin/Users/Groups.svelte index 89b4141d6b2..15497cb205b 100644 --- a/src/lib/components/admin/Users/Groups.svelte +++ b/src/lib/components/admin/Users/Groups.svelte @@ -52,12 +52,19 @@ prompts: false, tools: false }, + sharing: { + public_models: false, + public_knowledge: false, + public_prompts: false, + public_tools: false + }, chat: { controls: true, file_upload: true, delete: true, edit: true, - temporary: true + temporary: true, + temporary_enforced: true }, features: { web_search: true, diff --git a/src/lib/components/admin/Users/Groups/Permissions.svelte b/src/lib/components/admin/Users/Groups/Permissions.svelte index 157c8f7caa9..e1aa73f2a25 100644 --- a/src/lib/components/admin/Users/Groups/Permissions.svelte +++ b/src/lib/components/admin/Users/Groups/Permissions.svelte @@ -23,8 +23,9 @@ controls: true, delete: true, edit: true, + file_upload: true, temporary: true, - file_upload: true + temporary_enforced: true }, features: { web_search: true, @@ -277,6 +278,16 @@
+ + {#if permissions.chat.temporary} +
+
+ {$i18n.t('Enforce Temporary Chat')} +
+ + +
+ {/if}

diff --git a/src/lib/components/chat/ModelSelector.svelte b/src/lib/components/chat/ModelSelector.svelte index 9b77cd8ce29..b400f5c8616 100644 --- a/src/lib/components/chat/ModelSelector.svelte +++ b/src/lib/components/chat/ModelSelector.svelte @@ -46,7 +46,8 @@ model: model }))} showTemporaryChatControl={$user.role === 'user' - ? ($user?.permissions?.chat?.temporary ?? true) + ? ($user?.permissions?.chat?.temporary ?? true) && + !($user?.permissions?.chat?.temporary_enforced ?? false) : true} bind:value={selectedModel} /> diff --git a/src/routes/(app)/+layout.svelte b/src/routes/(app)/+layout.svelte index 52e7eaefd63..b68cc67a013 100644 --- a/src/routes/(app)/+layout.svelte +++ b/src/routes/(app)/+layout.svelte @@ -199,6 +199,12 @@ temporaryChatEnabled.set(true); } + console.log($user.permissions); + + if ($user?.permissions?.chat?.temporary_enforced) { + temporaryChatEnabled.set(true); + } + // Check for version updates if ($user.role === 'admin') { // Check if the user has dismissed the update toast in the last 24 hours From 0bc5441d725827e512a9bbb8f9f5997cafc3c6c1 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 17:58:48 -0700 Subject: [PATCH 486/623] refac: styling --- .../chat/ModelSelector/Selector.svelte | 320 +++++++++--------- 1 file changed, 159 insertions(+), 161 deletions(-) diff --git a/src/lib/components/chat/ModelSelector/Selector.svelte b/src/lib/components/chat/ModelSelector/Selector.svelte index 47f06f8f5d6..4b8eb39ca46 100644 --- a/src/lib/components/chat/ModelSelector/Selector.svelte +++ b/src/lib/components/chat/ModelSelector/Selector.svelte @@ -374,7 +374,7 @@ {/if}
- {#if tags} + {#if tags && items.filter((item) => !(item.model?.info?.meta?.hidden ?? false)).length > 0}
{ @@ -457,177 +457,175 @@
{/if} - {#each filteredItems as item, index} - {#if !(item.model?.info?.meta?.hidden ?? false)} - - {/if} + {#if value === item.value} +
+ +
+ {/if} + {:else}
From 391dd33da3b33186fe52894fcfba2235944f4e5c Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 17:59:21 -0700 Subject: [PATCH 487/623] chore: format --- backend/open_webui/retrieval/utils.py | 17 +++++++++++++---- src/lib/i18n/locales/ar-BH/translation.json | 10 ++++++++++ src/lib/i18n/locales/bg-BG/translation.json | 10 ++++++++++ src/lib/i18n/locales/bn-BD/translation.json | 10 ++++++++++ src/lib/i18n/locales/ca-ES/translation.json | 10 ++++++++++ src/lib/i18n/locales/ceb-PH/translation.json | 10 ++++++++++ src/lib/i18n/locales/cs-CZ/translation.json | 10 ++++++++++ src/lib/i18n/locales/da-DK/translation.json | 10 ++++++++++ src/lib/i18n/locales/de-DE/translation.json | 10 ++++++++++ src/lib/i18n/locales/dg-DG/translation.json | 10 ++++++++++ src/lib/i18n/locales/el-GR/translation.json | 10 ++++++++++ src/lib/i18n/locales/en-GB/translation.json | 10 ++++++++++ src/lib/i18n/locales/en-US/translation.json | 10 ++++++++++ src/lib/i18n/locales/es-ES/translation.json | 10 ++++++++++ src/lib/i18n/locales/et-EE/translation.json | 10 ++++++++++ src/lib/i18n/locales/eu-ES/translation.json | 10 ++++++++++ src/lib/i18n/locales/fa-IR/translation.json | 10 ++++++++++ src/lib/i18n/locales/fi-FI/translation.json | 10 ++++++++++ src/lib/i18n/locales/fr-CA/translation.json | 10 ++++++++++ src/lib/i18n/locales/fr-FR/translation.json | 10 ++++++++++ src/lib/i18n/locales/he-IL/translation.json | 10 ++++++++++ src/lib/i18n/locales/hi-IN/translation.json | 10 ++++++++++ src/lib/i18n/locales/hr-HR/translation.json | 10 ++++++++++ src/lib/i18n/locales/hu-HU/translation.json | 10 ++++++++++ src/lib/i18n/locales/id-ID/translation.json | 10 ++++++++++ src/lib/i18n/locales/ie-GA/translation.json | 10 ++++++++++ src/lib/i18n/locales/it-IT/translation.json | 10 ++++++++++ src/lib/i18n/locales/ja-JP/translation.json | 10 ++++++++++ src/lib/i18n/locales/ka-GE/translation.json | 10 ++++++++++ src/lib/i18n/locales/ko-KR/translation.json | 10 ++++++++++ src/lib/i18n/locales/lt-LT/translation.json | 10 ++++++++++ src/lib/i18n/locales/ms-MY/translation.json | 10 ++++++++++ src/lib/i18n/locales/nb-NO/translation.json | 10 ++++++++++ src/lib/i18n/locales/nl-NL/translation.json | 10 ++++++++++ src/lib/i18n/locales/pa-IN/translation.json | 10 ++++++++++ src/lib/i18n/locales/pl-PL/translation.json | 10 ++++++++++ src/lib/i18n/locales/pt-BR/translation.json | 10 ++++++++++ src/lib/i18n/locales/pt-PT/translation.json | 10 ++++++++++ src/lib/i18n/locales/ro-RO/translation.json | 10 ++++++++++ src/lib/i18n/locales/ru-RU/translation.json | 10 ++++++++++ src/lib/i18n/locales/sk-SK/translation.json | 10 ++++++++++ src/lib/i18n/locales/sr-RS/translation.json | 10 ++++++++++ src/lib/i18n/locales/sv-SE/translation.json | 10 ++++++++++ src/lib/i18n/locales/th-TH/translation.json | 10 ++++++++++ src/lib/i18n/locales/tk-TW/translation.json | 10 ++++++++++ src/lib/i18n/locales/tr-TR/translation.json | 10 ++++++++++ src/lib/i18n/locales/uk-UA/translation.json | 10 ++++++++++ src/lib/i18n/locales/ur-PK/translation.json | 10 ++++++++++ src/lib/i18n/locales/vi-VN/translation.json | 10 ++++++++++ src/lib/i18n/locales/zh-CN/translation.json | 10 ++++++++++ src/lib/i18n/locales/zh-TW/translation.json | 10 ++++++++++ 51 files changed, 513 insertions(+), 4 deletions(-) diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 06a90f59656..518a1213679 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -299,7 +299,10 @@ def query_collection_with_hybrid_search( log.exception(f"Failed to fetch collection {collection_name}: {e}") collection_results[collection_name] = None - log.info(f"Starting hybrid search for {len(queries)} queries in {len(collection_names)} collections...") + log.info( + f"Starting hybrid search for {len(queries)} queries in {len(collection_names)} collections..." + ) + def process_query(collection_name, query): try: result = query_doc_with_hybrid_search( @@ -317,7 +320,11 @@ def process_query(collection_name, query): log.exception(f"Error when querying the collection with hybrid_search: {e}") return None, e - tasks = [(collection_name, query) for collection_name in collection_names for query in queries] + tasks = [ + (collection_name, query) + for collection_name in collection_names + for query in queries + ] with ThreadPoolExecutor() as executor: future_results = [executor.submit(process_query, cn, q) for cn, q in tasks] @@ -330,8 +337,10 @@ def process_query(collection_name, query): results.append(result) if error and not results: - raise Exception("Hybrid search failed for all collections. Using Non-hybrid search as fallback.") - + raise Exception( + "Hybrid search failed for all collections. Using Non-hybrid search as fallback." + ) + return merge_and_sort_query_results(results, k=k) diff --git a/src/lib/i18n/locales/ar-BH/translation.json b/src/lib/i18n/locales/ar-BH/translation.json index 1cb0914bb84..a2c3a60a04f 100644 --- a/src/lib/i18n/locales/ar-BH/translation.json +++ b/src/lib/i18n/locales/ar-BH/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.", "Enter {{role}} message here": "أدخل رسالة {{role}} هنا", "Enter a detail about yourself for your LLMs to recall": "ادخل معلومات عنك تريد أن يتذكرها الموديل", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "أخفاء", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "كيف استطيع مساعدتك اليوم؟", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "لم يتم العثور على النموذج {{modelId}}.", "Model {{modelName}} is not vision capable": "نموذج {{modelName}} غير قادر على الرؤية", "Model {{name}} is now {{status}}": "نموذج {{name}} هو الآن {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "تم اكتشاف مسار نظام الملفات النموذجي. الاسم المختصر للنموذج مطلوب للتحديث، ولا يمكن الاستمرار.", @@ -712,6 +717,7 @@ "Models": "الموديلات", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "المزيد", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "مطالبات", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ", "Pull a model from Ollama.com": "Ollama.com سحب الموديل من ", @@ -968,9 +975,11 @@ "Share": "كشاركة", "Share Chat": "مشاركة الدردشة", "Share to Open WebUI Community": "OpenWebUI شارك في مجتمع", + "Sharing Permissions": "", "Show": "عرض", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "إظهار الاختصارات", "Show your support!": "", "Showcased creativity": "أظهر الإبداع", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/bg-BG/translation.json b/src/lib/i18n/locales/bg-BG/translation.json index 320fe26ead2..77f5d15ff62 100644 --- a/src/lib/i18n/locales/bg-BG/translation.json +++ b/src/lib/i18n/locales/bg-BG/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Включване на нови регистрации", "Enabled": "Активирано", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверете се, че вашият CSV файл включва 4 колони в следния ред: Име, Имейл, Парола, Роля.", "Enter {{role}} message here": "Въведете съобщение за {{role}} тук", "Enter a detail about yourself for your LLMs to recall": "Въведете подробности за себе си, за да ги запомнят вашите LLMs", @@ -569,6 +570,7 @@ "Hex Color": "Hex цвят", "Hex Color - Leave empty for default color": "Hex цвят - Оставете празно за цвят по подразбиране", "Hide": "Скрий", + "Hide Model": "", "Home": "Начало", "Host": "Хост", "How can I help you today?": "Как мога да ви помогна днес?", @@ -628,6 +630,7 @@ "Knowledge Access": "Достъп до знания", "Knowledge created successfully.": "Знанието е създадено успешно.", "Knowledge deleted successfully.": "Знанието е изтрито успешно.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Знанието е нулирано успешно.", "Knowledge updated successfully": "Знанието е актуализирано успешно", "Kokoro.js (Browser)": "Kokoro.js (Браузър)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Моделът {{modelId}} не е намерен", "Model {{modelName}} is not vision capable": "Моделът {{modelName}} не поддържа визуални възможности", "Model {{name}} is now {{status}}": "Моделът {{name}} сега е {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Моделът приема входни изображения", "Model created successfully!": "Моделът е създаден успешно!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Открит е път до файловата система на модела. За актуализацията се изисква съкратено име на модела, не може да продължи.", @@ -712,6 +717,7 @@ "Models": "Модели", "Models Access": "Достъп до модели", "Models configuration saved successfully": "Конфигурацията на моделите е запазена успешно", + "Models Public Sharing": "", "Mojeek Search API Key": "API ключ за Mojeek Search", "more": "още", "More": "Повече", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Промптът е актуализиран успешно", "Prompts": "Промптове", "Prompts Access": "Достъп до промптове", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com", "Pull a model from Ollama.com": "Издърпайте модел от Ollama.com", @@ -968,9 +975,11 @@ "Share": "Подели", "Share Chat": "Подели Чат", "Share to Open WebUI Community": "Споделете с OpenWebUI Общността", + "Sharing Permissions": "", "Show": "Покажи", "Show \"What's New\" modal on login": "Покажи модалния прозорец \"Какво е ново\" при вписване", "Show Admin Details in Account Pending Overlay": "Покажи детайлите на администратора в наслагването на изчакващ акаунт", + "Show Model": "", "Show shortcuts": "Покажи преки пътища", "Show your support!": "Покажете вашата подкрепа!", "Showcased creativity": "Показана креативност", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Промпт за извикване на функции на инструментите", "Tools have a function calling system that allows arbitrary code execution": "Инструментите имат система за извикване на функции, която позволява произволно изпълнение на код", "Tools have a function calling system that allows arbitrary code execution.": "Инструментите имат система за извикване на функции, която позволява произволно изпълнение на код.", + "Tools Public Sharing": "", "Top K": "Топ K", "Top K Reranker": "", "Top P": "Топ P", diff --git a/src/lib/i18n/locales/bn-BD/translation.json b/src/lib/i18n/locales/bn-BD/translation.json index 772adc3d0d2..71b95d74e3c 100644 --- a/src/lib/i18n/locales/bn-BD/translation.json +++ b/src/lib/i18n/locales/bn-BD/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "নতুন সাইনআপ চালু করুন", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "আপনার সিএসভি ফাইলটিতে এই ক্রমে 4 টি কলাম অন্তর্ভুক্ত রয়েছে তা নিশ্চিত করুন: নাম, ইমেল, পাসওয়ার্ড, ভূমিকা।.", "Enter {{role}} message here": "{{role}} মেসেজ এখানে লিখুন", "Enter a detail about yourself for your LLMs to recall": "আপনার এলএলএমগুলি স্মরণ করার জন্য নিজের সম্পর্কে একটি বিশদ লিখুন", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "লুকান", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "আপনাকে আজ কিভাবে সাহায্য করতে পারি?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "{{modelId}} মডেল পাওয়া যায়নি", "Model {{modelName}} is not vision capable": "মডেল {{modelName}} দৃষ্টি সক্ষম নয়", "Model {{name}} is now {{status}}": "মডেল {{name}} এখন {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "মডেল ফাইলসিস্টেম পাথ পাওয়া গেছে। আপডেটের জন্য মডেলের শর্টনেম আবশ্যক, এগিয়ে যাওয়া যাচ্ছে না।", @@ -712,6 +717,7 @@ "Models": "মডেলসমূহ", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "আরো", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "প্রম্পটসমূহ", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন", "Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন", @@ -968,9 +975,11 @@ "Share": "শেয়ার করুন", "Share Chat": "চ্যাট শেয়ার করুন", "Share to Open WebUI Community": "OpenWebUI কমিউনিটিতে শেয়ার করুন", + "Sharing Permissions": "", "Show": "দেখান", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "শর্টকাটগুলো দেখান", "Show your support!": "", "Showcased creativity": "সৃজনশীলতা প্রদর্শন", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ca-ES/translation.json b/src/lib/i18n/locales/ca-ES/translation.json index dd005b993f3..09000a7da11 100644 --- a/src/lib/i18n/locales/ca-ES/translation.json +++ b/src/lib/i18n/locales/ca-ES/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Permetre el mostreig de Mirostat per controlar la perplexitat", "Enable New Sign Ups": "Permetre nous registres", "Enabled": "Habilitat", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.", "Enter {{role}} message here": "Introdueix aquí el missatge de {{role}}", "Enter a detail about yourself for your LLMs to recall": "Introdueix un detall sobre tu què els teus models de llenguatge puguin recordar", @@ -569,6 +570,7 @@ "Hex Color": "Color hexadecimal", "Hex Color - Leave empty for default color": "Color hexadecimal - Deixar buit per a color per defecte", "Hide": "Amaga", + "Hide Model": "", "Home": "Inici", "Host": "Servidor", "How can I help you today?": "Com et puc ajudar avui?", @@ -628,6 +630,7 @@ "Knowledge Access": "Accés al coneixement", "Knowledge created successfully.": "Coneixement creat correctament.", "Knowledge deleted successfully.": "Coneixement eliminat correctament.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Coneixement restablert correctament.", "Knowledge updated successfully": "Coneixement actualitzat correctament.", "Kokoro.js (Browser)": "Kokoro.js (Navegador)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "No s'ha trobat el model {{modelId}}", "Model {{modelName}} is not vision capable": "El model {{modelName}} no és capaç de visió", "Model {{name}} is now {{status}}": "El model {{name}} ara és {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "El model accepta entrades d'imatge", "Model created successfully!": "Model creat correctament", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "S'ha detectat el camí del sistema de fitxers del model. És necessari un nom curt del model per actualitzar, no es pot continuar.", @@ -712,6 +717,7 @@ "Models": "Models", "Models Access": "Accés als models", "Models configuration saved successfully": "La configuració dels models s'ha desat correctament", + "Models Public Sharing": "", "Mojeek Search API Key": "Clau API de Mojeek Search", "more": "més", "More": "Més", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Indicació actualitzada correctament", "Prompts": "Indicacions", "Prompts Access": "Accés a les indicacions", + "Prompts Public Sharing": "", "Public": "Públic", "Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com", "Pull a model from Ollama.com": "Obtenir un model d'Ollama.com", @@ -968,9 +975,11 @@ "Share": "Compartir", "Share Chat": "Compartir el xat", "Share to Open WebUI Community": "Compartir amb la comunitat OpenWebUI", + "Sharing Permissions": "", "Show": "Mostrar", "Show \"What's New\" modal on login": "Veure 'Què hi ha de nou' a l'entrada", "Show Admin Details in Account Pending Overlay": "Mostrar els detalls de l'administrador a la superposició del compte pendent", + "Show Model": "", "Show shortcuts": "Mostrar dreceres", "Show your support!": "Mostra el teu suport!", "Showcased creativity": "Creativitat mostrada", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Indicació per a la crida de funcions", "Tools have a function calling system that allows arbitrary code execution": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari", "Tools have a function calling system that allows arbitrary code execution.": "Les eines disposen d'un sistema de crida a funcions que permet execució de codi arbitrari.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "Top K Reranker", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ceb-PH/translation.json b/src/lib/i18n/locales/ceb-PH/translation.json index 6e65e392636..4956d804786 100644 --- a/src/lib/i18n/locales/ceb-PH/translation.json +++ b/src/lib/i18n/locales/ceb-PH/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "I-enable ang bag-ong mga rehistro", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", "Enter {{role}} message here": "Pagsulod sa mensahe {{role}} dinhi", "Enter a detail about yourself for your LLMs to recall": "", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Tagoa", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Unsaon nako pagtabang kanimo karon?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelo {{modelId}} wala makit-an", "Model {{modelName}} is not vision capable": "", "Model {{name}} is now {{status}}": "", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "", @@ -712,6 +717,7 @@ "Models": "Mga modelo", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Mga aghat", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com", @@ -968,9 +975,11 @@ "Share": "", "Share Chat": "", "Share to Open WebUI Community": "Ipakigbahin sa komunidad sa OpenWebUI", + "Sharing Permissions": "", "Show": "Pagpakita", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "Ipakita ang mga shortcut", "Show your support!": "", "Showcased creativity": "", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Ibabaw nga P", diff --git a/src/lib/i18n/locales/cs-CZ/translation.json b/src/lib/i18n/locales/cs-CZ/translation.json index 92452a84994..c7d694b8ac6 100644 --- a/src/lib/i18n/locales/cs-CZ/translation.json +++ b/src/lib/i18n/locales/cs-CZ/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Povolit nové registrace", "Enabled": "Povoleno", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ujistěte se, že váš CSV soubor obsahuje 4 sloupce v tomto pořadí: Name, Email, Password, Role.", "Enter {{role}} message here": "Zadejte zprávu {{role}} sem", "Enter a detail about yourself for your LLMs to recall": "Zadejte podrobnost o sobě, kterou si vaše LLM mají pamatovat.", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Schovej", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Jak vám mohu dnes pomoci?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "Znalost úspěšně vytvořena.", "Knowledge deleted successfully.": "Znalosti byly úspěšně odstraněny.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Úspěšné obnovení znalostí.", "Knowledge updated successfully": "Znalosti úspěšně aktualizovány", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} nebyl nalezen", "Model {{modelName}} is not vision capable": "Model {{modelName}} není schopen zpracovávat vizuální data.", "Model {{name}} is now {{status}}": "Model {{name}} je nyní {{status}}.", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model přijímá vstupy ve formě obrázků", "Model created successfully!": "Model byl úspěšně vytvořen!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Detekována cesta v\u00a0souborovém systému. Je vyžadován krátký název modelu pro aktualizaci, nelze pokračovat.", @@ -712,6 +717,7 @@ "Models": "Modely", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "více", "More": "Více", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompty", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com", "Pull a model from Ollama.com": "Stáhněte model z Ollama.com", @@ -968,9 +975,11 @@ "Share": "Sdílet", "Share Chat": "Sdílet chat", "Share to Open WebUI Community": "Sdílet s komunitou OpenWebUI", + "Sharing Permissions": "", "Show": "Zobrazit", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Zobrazit podrobnosti administrátora v překryvném okně s čekajícím účtem", + "Show Model": "", "Show shortcuts": "Zobrazit klávesové zkratky", "Show your support!": "Vyjadřete svou podporu!", "Showcased creativity": "Předvedená kreativita", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Nástroje mají systém volání funkcí, který umožňuje libovolné spouštění kódu.", "Tools have a function calling system that allows arbitrary code execution.": "Nástroje mají systém volání funkcí, který umožňuje spuštění libovolného kódu.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/da-DK/translation.json b/src/lib/i18n/locales/da-DK/translation.json index 766249bf0ed..8037fedb55b 100644 --- a/src/lib/i18n/locales/da-DK/translation.json +++ b/src/lib/i18n/locales/da-DK/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktiver nye signups", "Enabled": "Aktiveret", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at din CSV-fil indeholder 4 kolonner in denne rækkefølge: Name, Email, Password, Role.", "Enter {{role}} message here": "Indtast {{role}} besked her", "Enter a detail about yourself for your LLMs to recall": "Indtast en detalje om dig selv, som dine LLMs kan huske", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Skjul", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Hvordan kan jeg hjælpe dig i dag?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "Viden oprettet.", "Knowledge deleted successfully.": "Viden slettet.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Viden nulstillet.", "Knowledge updated successfully": "Viden opdateret.", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} ikke fundet", "Model {{modelName}} is not vision capable": "Model {{modelName}} understøtter ikke billeder", "Model {{name}} is now {{status}}": "Model {{name}} er nu {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model accepterer billedinput", "Model created successfully!": "Model oprettet!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filsystemsti registreret. Modelkortnavn er påkrævet til opdatering, kan ikke fortsætte.", @@ -712,6 +717,7 @@ "Models": "Modeller", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Mere", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompts", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com", "Pull a model from Ollama.com": "Hent en model fra Ollama.com", @@ -968,9 +975,11 @@ "Share": "Del", "Share Chat": "Del chat", "Share to Open WebUI Community": "Del til OpenWebUI Community", + "Sharing Permissions": "", "Show": "Vis", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Vis administratordetaljer i overlay for ventende konto", + "Show Model": "", "Show shortcuts": "Vis genveje", "Show your support!": "Vis din støtte!", "Showcased creativity": "Udstillet kreativitet", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Værktøjer har et funktionkaldssystem, der tillader vilkårlig kodeudførelse", "Tools have a function calling system that allows arbitrary code execution.": "Værktøjer har et funktionkaldssystem, der tillader vilkårlig kodeudførelse.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/de-DE/translation.json b/src/lib/i18n/locales/de-DE/translation.json index c3665f24b26..60e9f218585 100644 --- a/src/lib/i18n/locales/de-DE/translation.json +++ b/src/lib/i18n/locales/de-DE/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Registrierung erlauben", "Enabled": "Aktiviert", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Stellen Sie sicher, dass Ihre CSV-Datei 4 Spalten in dieser Reihenfolge enthält: Name, E-Mail, Passwort, Rolle.", "Enter {{role}} message here": "Geben Sie die {{role}}-Nachricht hier ein", "Enter a detail about yourself for your LLMs to recall": "Geben Sie ein Detail über sich selbst ein, das Ihre Sprachmodelle (LLMs) sich merken sollen", @@ -569,6 +570,7 @@ "Hex Color": "Hex-Farbe", "Hex Color - Leave empty for default color": "Hex-Farbe - Leer lassen für Standardfarbe", "Hide": "Verbergen", + "Hide Model": "", "Home": "", "Host": "Host", "How can I help you today?": "Wie kann ich Ihnen heute helfen?", @@ -628,6 +630,7 @@ "Knowledge Access": "Wissenszugriff", "Knowledge created successfully.": "Wissen erfolgreich erstellt.", "Knowledge deleted successfully.": "Wissen erfolgreich gelöscht.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Wissen erfolgreich zurückgesetzt.", "Knowledge updated successfully": "Wissen erfolgreich aktualisiert", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modell {{modelId}} nicht gefunden", "Model {{modelName}} is not vision capable": "Das Modell {{modelName}} ist nicht für die Bildverarbeitung geeignet", "Model {{name}} is now {{status}}": "Modell {{name}} ist jetzt {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modell akzeptiert Bildeingaben", "Model created successfully!": "Modell erfolgreich erstellt!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modell-Dateisystempfad erkannt. Modellkurzname ist für das Update erforderlich, Fortsetzung nicht möglich.", @@ -712,6 +717,7 @@ "Models": "Modelle", "Models Access": "Modell-Zugriff", "Models configuration saved successfully": "Modellkonfiguration erfolgreich gespeichert", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API-Schlüssel", "more": "mehr", "More": "Mehr", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt erfolgreich aktualisiert", "Prompts": "Prompts", "Prompts Access": "Prompt-Zugriff", + "Prompts Public Sharing": "", "Public": "Öffentlich", "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen", "Pull a model from Ollama.com": "Modell von Ollama.com beziehen", @@ -968,9 +975,11 @@ "Share": "Teilen", "Share Chat": "Chat teilen", "Share to Open WebUI Community": "Mit OpenWebUI Community teilen", + "Sharing Permissions": "", "Show": "Anzeigen", "Show \"What's New\" modal on login": "\"Was gibt's Neues\"-Modal beim Anmelden anzeigen", "Show Admin Details in Account Pending Overlay": "Admin-Details im Account-Pending-Overlay anzeigen", + "Show Model": "", "Show shortcuts": "Verknüpfungen anzeigen", "Show your support!": "Zeigen Sie Ihre Unterstützung!", "Showcased creativity": "Kreativität gezeigt", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Prompt für Funktionssystemaufrufe", "Tools have a function calling system that allows arbitrary code execution": "Werkezuge verfügen über ein Funktionssystem, das die Ausführung beliebigen Codes ermöglicht", "Tools have a function calling system that allows arbitrary code execution.": "Werkzeuge verfügen über ein Funktionssystem, das die Ausführung beliebigen Codes ermöglicht.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/dg-DG/translation.json b/src/lib/i18n/locales/dg-DG/translation.json index 2b9dec6062f..46d8011882d 100644 --- a/src/lib/i18n/locales/dg-DG/translation.json +++ b/src/lib/i18n/locales/dg-DG/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Enable New Bark Ups", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", "Enter {{role}} message here": "Enter {{role}} bork here", "Enter a detail about yourself for your LLMs to recall": "", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Hide", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "How can I halp u today?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} not found", "Model {{modelName}} is not vision capable": "", "Model {{name}} is now {{status}}": "", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filesystem bark detected. Model shortname is required for update, cannot continue.", @@ -712,6 +717,7 @@ "Models": "Wowdels", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Promptos", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "Pull a wowdel from Ollama.com", @@ -968,9 +975,11 @@ "Share": "", "Share Chat": "", "Share to Open WebUI Community": "Share to Open WebUI Community much community", + "Sharing Permissions": "", "Show": "Show much show", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "Show shortcuts much shortcut", "Show your support!": "", "Showcased creativity": "", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K very top", "Top K Reranker": "", "Top P": "Top P very top", diff --git a/src/lib/i18n/locales/el-GR/translation.json b/src/lib/i18n/locales/el-GR/translation.json index 1ca65d51cdd..cdb982b7a8c 100644 --- a/src/lib/i18n/locales/el-GR/translation.json +++ b/src/lib/i18n/locales/el-GR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ενεργοποίηση Νέων Εγγραφών", "Enabled": "Ενεργοποιημένο", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Βεβαιωθείτε ότι το αρχείο CSV σας περιλαμβάνει 4 στήλες με αυτή τη σειρά: Όνομα, Email, Κωδικός, Ρόλος.", "Enter {{role}} message here": "Εισάγετε το μήνυμα {{role}} εδώ", "Enter a detail about yourself for your LLMs to recall": "Εισάγετε μια λεπτομέρεια για τον εαυτό σας ώστε τα LLMs να την ανακαλούν", @@ -569,6 +570,7 @@ "Hex Color": "Χρώμα Hex", "Hex Color - Leave empty for default color": "Χρώμα Hex - Αφήστε κενό για προεπιλεγμένο χρώμα", "Hide": "Απόκρυψη", + "Hide Model": "", "Home": "", "Host": "Διακομιστής", "How can I help you today?": "Πώς μπορώ να σας βοηθήσω σήμερα;", @@ -628,6 +630,7 @@ "Knowledge Access": "Πρόσβαση στη Γνώση", "Knowledge created successfully.": "Η γνώση δημιουργήθηκε με επιτυχία.", "Knowledge deleted successfully.": "Η γνώση διαγράφηκε με επιτυχία.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Η γνώση επαναφέρθηκε με επιτυχία.", "Knowledge updated successfully": "Η γνώση ενημερώθηκε με επιτυχία", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Το μοντέλο {{modelId}} δεν βρέθηκε", "Model {{modelName}} is not vision capable": "Το μοντέλο {{modelName}} δεν έχει δυνατότητα όρασης", "Model {{name}} is now {{status}}": "Το μοντέλο {{name}} είναι τώρα {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Το μοντέλο δέχεται είσοδο εικόνας", "Model created successfully!": "Το μοντέλο δημιουργήθηκε με επιτυχία!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Ανιχνεύθηκε διαδρομή αρχείου μοντέλου. Το σύντομο όνομα μοντέλου απαιτείται για ενημέρωση, δεν μπορεί να συνεχιστεί.", @@ -712,6 +717,7 @@ "Models": "Μοντέλα", "Models Access": "Πρόσβαση Μοντέλων", "Models configuration saved successfully": "Η διαμόρφωση των μοντέλων αποθηκεύτηκε με επιτυχία", + "Models Public Sharing": "", "Mojeek Search API Key": "Κλειδί API Mojeek Search", "more": "περισσότερα", "More": "Περισσότερα", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία", "Prompts": "Προτροπές", "Prompts Access": "Πρόσβαση Προτροπών", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com", "Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com", @@ -968,9 +975,11 @@ "Share": "Κοινή Χρήση", "Share Chat": "Κοινή Χρήση Συνομιλίας", "Share to Open WebUI Community": "Κοινή Χρήση στην Κοινότητα OpenWebUI", + "Sharing Permissions": "", "Show": "Εμφάνιση", "Show \"What's New\" modal on login": "Εμφάνιση του παράθυρου \"Τι νέο υπάρχει\" κατά την είσοδο", "Show Admin Details in Account Pending Overlay": "Εμφάνιση Λεπτομερειών Διαχειριστή στο Υπέρθεση Εκκρεμής Λογαριασμού", + "Show Model": "", "Show shortcuts": "Εμφάνιση συντομεύσεων", "Show your support!": "Δείξτε την υποστήριξή σας!", "Showcased creativity": "Εμφανιζόμενη δημιουργικότητα", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Τα εργαλεία διαθέτουν ένα σύστημα κλήσης λειτουργιών που επιτρέπει την αυθαίρετη εκτέλεση κώδικα", "Tools have a function calling system that allows arbitrary code execution.": "Τα εργαλεία διαθέτουν ένα σύστημα κλήσης λειτουργιών που επιτρέπει την αυθαίρετη εκτέλεση κώδικα.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/en-GB/translation.json b/src/lib/i18n/locales/en-GB/translation.json index 89846bc471a..d02bdaba7c3 100644 --- a/src/lib/i18n/locales/en-GB/translation.json +++ b/src/lib/i18n/locales/en-GB/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", "Enter {{role}} message here": "", "Enter a detail about yourself for your LLMs to recall": "", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "", "Model {{modelName}} is not vision capable": "", "Model {{name}} is now {{status}}": "", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "", @@ -712,6 +717,7 @@ "Models": "", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "", @@ -968,9 +975,11 @@ "Share": "", "Share Chat": "", "Share to Open WebUI Community": "", + "Sharing Permissions": "", "Show": "", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "", "Show your support!": "", "Showcased creativity": "", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "", "Top K Reranker": "", "Top P": "", diff --git a/src/lib/i18n/locales/en-US/translation.json b/src/lib/i18n/locales/en-US/translation.json index 89846bc471a..d02bdaba7c3 100644 --- a/src/lib/i18n/locales/en-US/translation.json +++ b/src/lib/i18n/locales/en-US/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", "Enter {{role}} message here": "", "Enter a detail about yourself for your LLMs to recall": "", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "", "Model {{modelName}} is not vision capable": "", "Model {{name}} is now {{status}}": "", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "", @@ -712,6 +717,7 @@ "Models": "", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "", @@ -968,9 +975,11 @@ "Share": "", "Share Chat": "", "Share to Open WebUI Community": "", + "Sharing Permissions": "", "Show": "", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "", "Show your support!": "", "Showcased creativity": "", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "", "Top K Reranker": "", "Top P": "", diff --git a/src/lib/i18n/locales/es-ES/translation.json b/src/lib/i18n/locales/es-ES/translation.json index 3e5acea3721..ecb76f49cc3 100644 --- a/src/lib/i18n/locales/es-ES/translation.json +++ b/src/lib/i18n/locales/es-ES/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Habilitar muestreo Mirostat para controlar la perplejidad.", "Enable New Sign Ups": "Habilitar Registros de Nuevos Usuarios", "Enabled": "Habilitado", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asegúrese de que su archivo CSV incluya 4 columnas en este orden: Nombre, Correo Electrónico, Contraseña, Rol.", "Enter {{role}} message here": "Ingresar mensaje {{role}} aquí", "Enter a detail about yourself for your LLMs to recall": "Ingresar detalles sobre ti para que los recuerden sus LLMs", @@ -569,6 +570,7 @@ "Hex Color": "Color Hex", "Hex Color - Leave empty for default color": "Color Hex - Deja vacío para el color predeterminado", "Hide": "Esconder", + "Hide Model": "", "Home": "Inicio", "Host": "Host", "How can I help you today?": "¿Cómo puedo ayudarte hoy?", @@ -628,6 +630,7 @@ "Knowledge Access": "Acceso a Conocimiento", "Knowledge created successfully.": "Conocimiento creado correctamente.", "Knowledge deleted successfully.": "Conocimiento eliminado correctamente.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Conocimiento restablecido correctamente.", "Knowledge updated successfully": "Conocimiento actualizado correctamente.", "Kokoro.js (Browser)": "Kokoro.js (Navegador)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelo {{modelId}} no encontrado", "Model {{modelName}} is not vision capable": "Modelo {{modelName}} no esta capacitado para visión", "Model {{name}} is now {{status}}": "Modelo {{name}} está ahora {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modelo acepta entradas de imágen", "Model created successfully!": "¡Modelo creado correctamente!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Detectada ruta del sistema al modelo. Para actualizar se requiere el nombre corto del modelo, no se puede continuar.", @@ -712,6 +717,7 @@ "Models": "Modelos", "Models Access": "Acceso Modelos", "Models configuration saved successfully": "Configuración de Modelos guardada correctamente", + "Models Public Sharing": "", "Mojeek Search API Key": "Clave API de Mojeek Search", "more": "más", "More": "Más", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Indicador(prompt) actualizado correctamente", "Prompts": "Indicadores(prompts)", "Prompts Access": "Acceso a Indicadores(prompts)", + "Prompts Public Sharing": "", "Public": "Público", "Pull \"{{searchValue}}\" from Ollama.com": "Extraer \"{{searchValue}}\" desde Ollama.com", "Pull a model from Ollama.com": "Extraer un modelo desde Ollama.com", @@ -968,9 +975,11 @@ "Share": "Compartir", "Share Chat": "Compartir Chat", "Share to Open WebUI Community": "Compartir con la Comunidad Open-WebUI", + "Sharing Permissions": "", "Show": "Mostrar", "Show \"What's New\" modal on login": "Mostrar modal \"Qué hay de Nuevo\" al iniciar sesión", "Show Admin Details in Account Pending Overlay": "Mostrar Detalles Admin en la Sobrecapa Cuenta Pendiente", + "Show Model": "", "Show shortcuts": "Mostrar Atajos", "Show your support!": "¡Muestra tu apoyo!", "Showcased creativity": "Creatividad exhibida", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Indicador(prompt) para la Función de Llamada a las Herramientas", "Tools have a function calling system that allows arbitrary code execution": "Las herramientas tienen un sistema de llamadas de funciones que permite la ejecución de código arbitrario", "Tools have a function calling system that allows arbitrary code execution.": "Las herramientas tienen un sistema de llamada de funciones que permite la ejecución de código arbitrario.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "Top K Reclasificador", "Top P": "Top P", diff --git a/src/lib/i18n/locales/et-EE/translation.json b/src/lib/i18n/locales/et-EE/translation.json index c797fdf4e31..b8d3c7bbe07 100644 --- a/src/lib/i18n/locales/et-EE/translation.json +++ b/src/lib/i18n/locales/et-EE/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Luba Mirostat'i valim perplekssuse juhtimiseks.", "Enable New Sign Ups": "Luba uued registreerimised", "Enabled": "Lubatud", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Veenduge, et teie CSV-fail sisaldab 4 veergu selles järjekorras: Nimi, E-post, Parool, Roll.", "Enter {{role}} message here": "Sisestage {{role}} sõnum siia", "Enter a detail about yourself for your LLMs to recall": "Sisestage detail enda kohta, mida teie LLM-id saavad meenutada", @@ -569,6 +570,7 @@ "Hex Color": "Hex värv", "Hex Color - Leave empty for default color": "Hex värv - jätke tühjaks vaikevärvi jaoks", "Hide": "Peida", + "Hide Model": "", "Home": "Avaleht", "Host": "Host", "How can I help you today?": "Kuidas saan teid täna aidata?", @@ -628,6 +630,7 @@ "Knowledge Access": "Teadmiste juurdepääs", "Knowledge created successfully.": "Teadmised edukalt loodud.", "Knowledge deleted successfully.": "Teadmised edukalt kustutatud.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Teadmised edukalt lähtestatud.", "Knowledge updated successfully": "Teadmised edukalt uuendatud", "Kokoro.js (Browser)": "Kokoro.js (brauser)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Mudelit {{modelId}} ei leitud", "Model {{modelName}} is not vision capable": "Mudel {{modelName}} ei ole võimeline visuaalseid sisendeid töötlema", "Model {{name}} is now {{status}}": "Mudel {{name}} on nüüd {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Mudel võtab vastu pilte sisendina", "Model created successfully!": "Mudel edukalt loodud!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Tuvastati mudeli failisüsteemi tee. Uuendamiseks on vajalik mudeli lühinimi, ei saa jätkata.", @@ -712,6 +717,7 @@ "Models": "Mudelid", "Models Access": "Mudelite juurdepääs", "Models configuration saved successfully": "Mudelite seadistus edukalt salvestatud", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API võti", "more": "rohkem", "More": "Rohkem", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Vihje edukalt uuendatud", "Prompts": "Vihjed", "Prompts Access": "Vihjete juurdepääs", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Tõmba \"{{searchValue}}\" Ollama.com-ist", "Pull a model from Ollama.com": "Tõmba mudel Ollama.com-ist", @@ -968,9 +975,11 @@ "Share": "Jaga", "Share Chat": "Jaga vestlust", "Share to Open WebUI Community": "Jaga Open WebUI kogukonnaga", + "Sharing Permissions": "", "Show": "Näita", "Show \"What's New\" modal on login": "Näita \"Mis on uut\" modaalakent sisselogimisel", "Show Admin Details in Account Pending Overlay": "Näita administraatori üksikasju konto ootel kattekihil", + "Show Model": "", "Show shortcuts": "Näita otseteid", "Show your support!": "Näita oma toetust!", "Showcased creativity": "Näitas loovust", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Tööriistade funktsioonide kutsumise vihje", "Tools have a function calling system that allows arbitrary code execution": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist", "Tools have a function calling system that allows arbitrary code execution.": "Tööriistadel on funktsioonide kutsumise süsteem, mis võimaldab suvalise koodi täitmist.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/eu-ES/translation.json b/src/lib/i18n/locales/eu-ES/translation.json index 38c18f0147d..a3f930a6b49 100644 --- a/src/lib/i18n/locales/eu-ES/translation.json +++ b/src/lib/i18n/locales/eu-ES/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Gaitu Izena Emate Berriak", "Enabled": "Gaituta", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ziurtatu zure CSV fitxategiak 4 zutabe dituela ordena honetan: Izena, Posta elektronikoa, Pasahitza, Rola.", "Enter {{role}} message here": "Sartu {{role}} mezua hemen", "Enter a detail about yourself for your LLMs to recall": "Sartu zure buruari buruzko xehetasun bat LLMek gogoratzeko", @@ -569,6 +570,7 @@ "Hex Color": "Hex Kolorea", "Hex Color - Leave empty for default color": "Hex Kolorea - Utzi hutsik kolore lehenetsia erabiltzeko", "Hide": "Ezkutatu", + "Hide Model": "", "Home": "", "Host": "Ostalaria", "How can I help you today?": "Zertan lagun zaitzaket gaur?", @@ -628,6 +630,7 @@ "Knowledge Access": "Ezagutzarako Sarbidea", "Knowledge created successfully.": "Ezagutza ongi sortu da.", "Knowledge deleted successfully.": "Ezagutza ongi ezabatu da.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Ezagutza ongi berrezarri da.", "Knowledge updated successfully": "Ezagutza ongi eguneratu da.", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "{{modelId}} modeloa ez da aurkitu", "Model {{modelName}} is not vision capable": "{{modelName}} modeloak ez du ikusmen gaitasunik", "Model {{name}} is now {{status}}": "{{name}} modeloa orain {{status}} dago", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modeloak irudi sarrerak onartzen ditu", "Model created successfully!": "Modeloa ongi sortu da!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modeloaren fitxategi sistemaren bidea detektatu da. Modeloaren izen laburra behar da eguneratzeko, ezin da jarraitu.", @@ -712,6 +717,7 @@ "Models": "Modeloak", "Models Access": "Modeloen sarbidea", "Models configuration saved successfully": "Modeloen konfigurazioa ongi gorde da", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek bilaketa API gakoa", "more": "gehiago", "More": "Gehiago", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt-a ongi eguneratu da", "Prompts": "Prompt-ak", "Prompts Access": "Prompt sarbidea", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ekarri \"{{searchValue}}\" Ollama.com-etik", "Pull a model from Ollama.com": "Ekarri modelo bat Ollama.com-etik", @@ -968,9 +975,11 @@ "Share": "Partekatu", "Share Chat": "Partekatu txata", "Share to Open WebUI Community": "Partekatu OpenWebUI komunitatearekin", + "Sharing Permissions": "", "Show": "Erakutsi", "Show \"What's New\" modal on login": "Erakutsi \"Berritasunak\" modala saioa hastean", "Show Admin Details in Account Pending Overlay": "Erakutsi administratzaile xehetasunak kontu zain geruzan", + "Show Model": "", "Show shortcuts": "Erakutsi lasterbideak", "Show your support!": "Erakutsi zure babesa!", "Showcased creativity": "Erakutsitako sormena", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Tresnek kode arbitrarioa exekutatzeko aukera ematen duen funtzio deitzeko sistema dute", "Tools have a function calling system that allows arbitrary code execution.": "Tresnek kode arbitrarioa exekutatzeko aukera ematen duen funtzio deitzeko sistema dute.", + "Tools Public Sharing": "", "Top K": "Goiko K", "Top K Reranker": "", "Top P": "Goiko P", diff --git a/src/lib/i18n/locales/fa-IR/translation.json b/src/lib/i18n/locales/fa-IR/translation.json index 146db41e052..94ebb454869 100644 --- a/src/lib/i18n/locales/fa-IR/translation.json +++ b/src/lib/i18n/locales/fa-IR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "فعال کردن ثبت نام\u200cهای جدید", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "اطمینان حاصل کنید که فایل CSV شما شامل چهار ستون در این ترتیب است: نام، ایمیل، رمز عبور، نقش.", "Enter {{role}} message here": "پیام {{role}} را اینجا وارد کنید", "Enter a detail about yourself for your LLMs to recall": "برای ذخیره سازی اطلاعات خود، یک توضیح کوتاه درباره خود را وارد کنید", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "پنهان\u200cسازی", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "امروز چطور می توانم کمک تان کنم؟", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "مدل {{modelId}} یافت نشد", "Model {{modelName}} is not vision capable": "مدل {{modelName}} قادر به بینایی نیست", "Model {{name}} is now {{status}}": "مدل {{name}} در حال حاضر {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "مسیر فایل سیستم مدل یافت شد. برای بروزرسانی نیاز است نام کوتاه مدل وجود داشته باشد.", @@ -712,6 +717,7 @@ "Models": "مدل\u200cها", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "بیشتر", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "پرامپت\u200cها", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "بازگرداندن \"{{searchValue}}\" از Ollama.com", "Pull a model from Ollama.com": "دریافت یک مدل از Ollama.com", @@ -968,9 +975,11 @@ "Share": "اشتراک\u200cگذاری", "Share Chat": "اشتراک\u200cگذاری چت", "Share to Open WebUI Community": "اشتراک گذاری با OpenWebUI Community", + "Sharing Permissions": "", "Show": "نمایش", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "نمایش میانبرها", "Show your support!": "", "Showcased creativity": "ایده\u200cآفرینی", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/fi-FI/translation.json b/src/lib/i18n/locales/fi-FI/translation.json index 1768e3a8dcd..1a9741dcded 100644 --- a/src/lib/i18n/locales/fi-FI/translation.json +++ b/src/lib/i18n/locales/fi-FI/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Salli uudet rekisteröitymiset", "Enabled": "Käytössä", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Varmista, että CSV-tiedostossasi on 4 saraketta tässä järjestyksessä: Nimi, Sähköposti, Salasana, Rooli.", "Enter {{role}} message here": "Kirjoita {{role}}-viesti tähän", "Enter a detail about yourself for your LLMs to recall": "Kirjoita yksityiskohta itsestäsi, jonka LLM-ohjelmat voivat muistaa", @@ -569,6 +570,7 @@ "Hex Color": "Heksadesimaaliväri", "Hex Color - Leave empty for default color": "Heksadesimaaliväri - Jätä tyhjäksi, jos haluat oletusvärin", "Hide": "Piilota", + "Hide Model": "", "Home": "Koti", "Host": "Palvelin", "How can I help you today?": "Miten voin auttaa sinua tänään?", @@ -628,6 +630,7 @@ "Knowledge Access": "Tiedon käyttöoikeus", "Knowledge created successfully.": "Tietokanta luotu onnistuneesti.", "Knowledge deleted successfully.": "Tietokanta poistettu onnistuneesti.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Tietokanta nollattu onnistuneesti.", "Knowledge updated successfully": "Tietokanta päivitetty onnistuneesti", "Kokoro.js (Browser)": "Kokoro.js (selain)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Mallia {{modelId}} ei löytynyt", "Model {{modelName}} is not vision capable": "Malli {{modelName}} ei kykene näkökykyyn", "Model {{name}} is now {{status}}": "Malli {{name}} on nyt {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Malli hyväksyy kuvasyötteitä", "Model created successfully!": "Malli luotu onnistuneesti!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Mallin tiedostojärjestelmäpolku havaittu. Mallin lyhytnimi vaaditaan päivitykseen, ei voida jatkaa.", @@ -712,6 +717,7 @@ "Models": "Mallit", "Models Access": "Mallien käyttöoikeudet", "Models configuration saved successfully": "Mallien määritykset tallennettu onnistuneesti", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API -avain", "more": "lisää", "More": "Lisää", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Kehote päivitetty onnistuneesti", "Prompts": "Kehotteet", "Prompts Access": "Kehoitteiden käyttöoikeudet", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Lataa \"{{searchValue}}\" Ollama.comista", "Pull a model from Ollama.com": "Lataa malli Ollama.comista", @@ -968,9 +975,11 @@ "Share": "Jaa", "Share Chat": "Jaa keskustelu", "Share to Open WebUI Community": "Jaa OpenWebUI-yhteisöön", + "Sharing Permissions": "", "Show": "Näytä", "Show \"What's New\" modal on login": "Näytä \"Mitä uutta\" -modaali kirjautumisen yhteydessä", "Show Admin Details in Account Pending Overlay": "Näytä ylläpitäjän tiedot odottavan tilin päällä", + "Show Model": "", "Show shortcuts": "Näytä pikanäppäimet", "Show your support!": "Osoita tukesi!", "Showcased creativity": "Osoitti luovuutta", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Työkalujen kutsukehote", "Tools have a function calling system that allows arbitrary code execution": "Työkaluilla on toimintokutsuihin perustuva järjestelmä, joka sallii mielivaltaisen koodin suorittamisen", "Tools have a function calling system that allows arbitrary code execution.": "Työkalut sallivat mielivaltaisen koodin suorittamisen toimintokutsuilla.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/fr-CA/translation.json b/src/lib/i18n/locales/fr-CA/translation.json index 3bc03318f67..844cef9bab4 100644 --- a/src/lib/i18n/locales/fr-CA/translation.json +++ b/src/lib/i18n/locales/fr-CA/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activer les nouvelles inscriptions", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.", "Enter {{role}} message here": "Entrez le message {{role}} ici", "Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Cacher", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Comment puis-je vous être utile aujourd'hui ?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modèle {{modelId}} introuvable", "Model {{modelName}} is not vision capable": "Le modèle {{modelName}} n'a pas de capacités visuelles", "Model {{name}} is now {{status}}": "Le modèle {{name}} est désormais {{status}}.", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "Le modèle a été créé avec succès !", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Chemin du système de fichiers de modèle détecté. Le nom court du modèle est requis pour la mise à jour, l'opération ne peut pas être poursuivie.", @@ -712,6 +717,7 @@ "Models": "Modèles", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Plus de", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompts", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com", "Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com", @@ -968,9 +975,11 @@ "Share": "Partager", "Share Chat": "Partage de conversation", "Share to Open WebUI Community": "Partager avec la communauté OpenWebUI", + "Sharing Permissions": "", "Show": "Montrer", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Afficher les détails de l'administrateur dans la superposition en attente du compte", + "Show Model": "", "Show shortcuts": "Afficher les raccourcis", "Show your support!": "Montre ton soutien !", "Showcased creativity": "Créativité mise en avant", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/fr-FR/translation.json b/src/lib/i18n/locales/fr-FR/translation.json index 902a1a47860..1ce104c105c 100644 --- a/src/lib/i18n/locales/fr-FR/translation.json +++ b/src/lib/i18n/locales/fr-FR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activer les nouvelles inscriptions", "Enabled": "Activé", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.", "Enter {{role}} message here": "Entrez le message {{role}} ici", "Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler", @@ -569,6 +570,7 @@ "Hex Color": "Couleur Hex", "Hex Color - Leave empty for default color": "Couleur Hex - Laissez vide pour la couleur par défaut", "Hide": "Cacher", + "Hide Model": "", "Home": "", "Host": "Hôte", "How can I help you today?": "Comment puis-je vous aider aujourd'hui ?", @@ -628,6 +630,7 @@ "Knowledge Access": "Accès aux connaissances", "Knowledge created successfully.": "Connaissance créée avec succès.", "Knowledge deleted successfully.": "Connaissance supprimée avec succès.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Connaissance réinitialisée avec succès.", "Knowledge updated successfully": "Connaissance mise à jour avec succès", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modèle {{modelId}} introuvable", "Model {{modelName}} is not vision capable": "Le modèle {{modelName}} n'a pas de capacités visuelles", "Model {{name}} is now {{status}}": "Le modèle {{name}} est désormais {{status}}.", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Le modèle accepte les images en entrée", "Model created successfully!": "Le modèle a été créé avec succès !", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Chemin du système de fichiers de modèle détecté. Le nom court du modèle est requis pour la mise à jour, l'opération ne peut pas être poursuivie.", @@ -712,6 +717,7 @@ "Models": "Modèles", "Models Access": "Accès aux modèles", "Models configuration saved successfully": "Configuration des modèles enregistrée avec succès", + "Models Public Sharing": "", "Mojeek Search API Key": "Clé API Mojeek", "more": "plus", "More": "Plus", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt mis à jour avec succès", "Prompts": "Prompts", "Prompts Access": "Accès aux prompts", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com", "Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com", @@ -968,9 +975,11 @@ "Share": "Partager", "Share Chat": "Partage de conversation", "Share to Open WebUI Community": "Partager avec la communauté OpenWebUI", + "Sharing Permissions": "", "Show": "Afficher", "Show \"What's New\" modal on login": "Afficher la fenêtre modale \"Quoi de neuf\" lors de la connexion", "Show Admin Details in Account Pending Overlay": "Afficher les coordonnées de l'administrateur aux comptes en attente", + "Show Model": "", "Show shortcuts": "Afficher les raccourcis", "Show your support!": "Montrez votre soutien !", "Showcased creativity": "Créativité mise en avant", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire", "Tools have a function calling system that allows arbitrary code execution.": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/he-IL/translation.json b/src/lib/i18n/locales/he-IL/translation.json index 2e67b4141ba..9ce4d61aeea 100644 --- a/src/lib/i18n/locales/he-IL/translation.json +++ b/src/lib/i18n/locales/he-IL/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "אפשר הרשמות חדשות", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ודא שקובץ ה-CSV שלך כולל 4 עמודות בסדר הבא: שם, דוא\"ל, סיסמה, תפקיד.", "Enter {{role}} message here": "הזן הודעת {{role}} כאן", "Enter a detail about yourself for your LLMs to recall": "הזן פרטים על עצמך כדי שLLMs יזכור", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "הסתר", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "כיצד אוכל לעזור לך היום?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "המודל {{modelId}} לא נמצא", "Model {{modelName}} is not vision capable": "דגם {{modelName}} אינו בעל יכולת ראייה", "Model {{name}} is now {{status}}": "דגם {{name}} הוא כעת {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "נתיב מערכת הקבצים של המודל זוהה. נדרש שם קצר של המודל לעדכון, לא ניתן להמשיך.", @@ -712,6 +717,7 @@ "Models": "מודלים", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "עוד", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "פקודות", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "משוך \"{{searchValue}}\" מ-Ollama.com", "Pull a model from Ollama.com": "משוך מודל מ-Ollama.com", @@ -968,9 +975,11 @@ "Share": "שתף", "Share Chat": "שתף צ'אט", "Share to Open WebUI Community": "שתף לקהילת OpenWebUI", + "Sharing Permissions": "", "Show": "הצג", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "הצג קיצורי דרך", "Show your support!": "", "Showcased creativity": "הצגת יצירתיות", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/hi-IN/translation.json b/src/lib/i18n/locales/hi-IN/translation.json index 8496920b691..2ba22fc621c 100644 --- a/src/lib/i18n/locales/hi-IN/translation.json +++ b/src/lib/i18n/locales/hi-IN/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "नए साइन अप सक्रिय करें", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "सुनिश्चित करें कि आपकी CSV फ़ाइल में इस क्रम में 4 कॉलम शामिल हैं: नाम, ईमेल, पासवर्ड, भूमिका।", "Enter {{role}} message here": "यहां {{role}} संदेश दर्ज करें", "Enter a detail about yourself for your LLMs to recall": "अपने एलएलएम को याद करने के लिए अपने बारे में एक विवरण दर्ज करें", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "छुपाएं", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "आज मैं आपकी कैसे मदद कर सकता हूँ?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "मॉडल {{modelId}} नहीं मिला", "Model {{modelName}} is not vision capable": "मॉडल {{modelName}} दृष्टि सक्षम नहीं है", "Model {{name}} is now {{status}}": "मॉडल {{name}} अब {{status}} है", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "मॉडल फ़ाइल सिस्टम पथ का पता चला. अद्यतन के लिए मॉडल संक्षिप्त नाम आवश्यक है, जारी नहीं रखा जा सकता।", @@ -712,6 +717,7 @@ "Models": "सभी मॉडल", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "और..", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "प्रॉम्प्ट", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" को Ollama.com से खींचें", "Pull a model from Ollama.com": "Ollama.com से एक मॉडल खींचें", @@ -968,9 +975,11 @@ "Share": "साझा करें", "Share Chat": "चैट साझा करें", "Share to Open WebUI Community": "OpenWebUI समुदाय में साझा करें", + "Sharing Permissions": "", "Show": "दिखाओ", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "शॉर्टकट दिखाएँ", "Show your support!": "", "Showcased creativity": "रचनात्मकता का प्रदर्शन किया", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "शीर्ष K", "Top K Reranker": "", "Top P": "शीर्ष P", diff --git a/src/lib/i18n/locales/hr-HR/translation.json b/src/lib/i18n/locales/hr-HR/translation.json index 3e92d9b236c..5ec5454bd1f 100644 --- a/src/lib/i18n/locales/hr-HR/translation.json +++ b/src/lib/i18n/locales/hr-HR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Omogući nove prijave", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Provjerite da vaša CSV datoteka uključuje 4 stupca u ovom redoslijedu: Name, Email, Password, Role.", "Enter {{role}} message here": "Unesite {{role}} poruku ovdje", "Enter a detail about yourself for your LLMs to recall": "Unesite pojedinosti o sebi da bi učitali memoriju u LLM", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Sakrij", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Kako vam mogu pomoći danas?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} nije pronađen", "Model {{modelName}} is not vision capable": "Model {{modelName}} ne čita vizualne impute", "Model {{name}} is now {{status}}": "Model {{name}} sada je {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Otkriven put datotečnog sustava modela. Kratko ime modela je potrebno za ažuriranje, nije moguće nastaviti.", @@ -712,6 +717,7 @@ "Models": "Modeli", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Više", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompti", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Povucite \"{{searchValue}}\" s Ollama.com", "Pull a model from Ollama.com": "Povucite model s Ollama.com", @@ -968,9 +975,11 @@ "Share": "Podijeli", "Share Chat": "Podijeli razgovor", "Share to Open WebUI Community": "Podijeli u OpenWebUI zajednici", + "Sharing Permissions": "", "Show": "Pokaži", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "Pokaži prečace", "Show your support!": "", "Showcased creativity": "Prikazana kreativnost", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/hu-HU/translation.json b/src/lib/i18n/locales/hu-HU/translation.json index 676ac9e5358..983f5a1665b 100644 --- a/src/lib/i18n/locales/hu-HU/translation.json +++ b/src/lib/i18n/locales/hu-HU/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Új regisztrációk engedélyezése", "Enabled": "Engedélyezve", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Győződj meg róla, hogy a CSV fájl tartalmazza ezt a 4 oszlopot ebben a sorrendben: Név, Email, Jelszó, Szerep.", "Enter {{role}} message here": "Írd ide a {{role}} üzenetet", "Enter a detail about yourself for your LLMs to recall": "Adj meg egy részletet magadról, amit az LLM-ek megjegyezhetnek", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Elrejtés", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Hogyan segíthetek ma?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "Tudásbázis sikeresen létrehozva.", "Knowledge deleted successfully.": "Tudásbázis sikeresen törölve.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Tudásbázis sikeresen visszaállítva.", "Knowledge updated successfully": "Tudásbázis sikeresen frissítve", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "A {{modelId}} modell nem található", "Model {{modelName}} is not vision capable": "A {{modelName}} modell nem képes képfeldolgozásra", "Model {{name}} is now {{status}}": "A {{name}} modell most {{status}} állapotban van", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "A modell elfogad képbemenetet", "Model created successfully!": "Modell sikeresen létrehozva!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modell fájlrendszer útvonal észlelve. A modell rövid neve szükséges a frissítéshez, nem folytatható.", @@ -712,6 +717,7 @@ "Models": "Modellek", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "több", "More": "Több", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Promptok", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" letöltése az Ollama.com-ról", "Pull a model from Ollama.com": "Modell letöltése az Ollama.com-ról", @@ -968,9 +975,11 @@ "Share": "Megosztás", "Share Chat": "Beszélgetés megosztása", "Share to Open WebUI Community": "Megosztás az OpenWebUI közösséggel", + "Sharing Permissions": "", "Show": "Mutat", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Admin részletek megjelenítése a függő fiók átfedésben", + "Show Model": "", "Show shortcuts": "Gyorsbillentyűk megjelenítése", "Show your support!": "Mutassa meg támogatását!", "Showcased creativity": "Kreativitás bemutatva", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Az eszközök olyan függvényhívó rendszerrel rendelkeznek, amely lehetővé teszi tetszőleges kód végrehajtását", "Tools have a function calling system that allows arbitrary code execution.": "Az eszközök olyan függvényhívó rendszerrel rendelkeznek, amely lehetővé teszi tetszőleges kód végrehajtását.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/id-ID/translation.json b/src/lib/i18n/locales/id-ID/translation.json index 1fd3475d27b..a0cd245013f 100644 --- a/src/lib/i18n/locales/id-ID/translation.json +++ b/src/lib/i18n/locales/id-ID/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktifkan Pendaftaran Baru", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Pastikan file CSV Anda menyertakan 4 kolom dengan urutan sebagai berikut: Nama, Email, Kata Sandi, Peran.", "Enter {{role}} message here": "Masukkan pesan {{role}} di sini", "Enter a detail about yourself for your LLMs to recall": "Masukkan detail tentang diri Anda untuk diingat oleh LLM Anda", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Sembunyikan", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Ada yang bisa saya bantu hari ini?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} tidak ditemukan", "Model {{modelName}} is not vision capable": "Model {{modelName}} tidak dapat dilihat", "Model {{name}} is now {{status}}": "Model {{name}} sekarang menjadi {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "Model berhasil dibuat!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Jalur sistem berkas model terdeteksi. Nama pendek model diperlukan untuk pembaruan, tidak dapat dilanjutkan.", @@ -712,6 +717,7 @@ "Models": "Model", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Lainnya", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompt", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{searchValue}}\" dari Ollama.com", "Pull a model from Ollama.com": "Tarik model dari Ollama.com", @@ -968,9 +975,11 @@ "Share": "Berbagi", "Share Chat": "Bagikan Obrolan", "Share to Open WebUI Community": "Bagikan ke Komunitas OpenWebUI", + "Sharing Permissions": "", "Show": "Tampilkan", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Tampilkan Detail Admin di Hamparan Akun Tertunda", + "Show Model": "", "Show shortcuts": "Tampilkan pintasan", "Show your support!": "Tunjukkan dukungan Anda!", "Showcased creativity": "Menampilkan kreativitas", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "K atas", "Top K Reranker": "", "Top P": "P Atas", diff --git a/src/lib/i18n/locales/ie-GA/translation.json b/src/lib/i18n/locales/ie-GA/translation.json index e90e8d91cfe..ed443018170 100644 --- a/src/lib/i18n/locales/ie-GA/translation.json +++ b/src/lib/i18n/locales/ie-GA/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Cumasaigh Clárúcháin Nua", "Enabled": "Cumasaithe", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Déan cinnte go bhfuil 4 cholún san ord seo i do chomhad CSV: Ainm, Ríomhphost, Pasfhocal, Ról.", "Enter {{role}} message here": "Cuir isteach teachtaireacht {{role}} anseo", "Enter a detail about yourself for your LLMs to recall": "Cuir isteach mionsonraí fút féin chun do LLManna a mheabhrú", @@ -569,6 +570,7 @@ "Hex Color": "Dath Heics", "Hex Color - Leave empty for default color": "Dath Heics - Fág folamh don dath réamhshocraithe", "Hide": "Folaigh", + "Hide Model": "", "Home": "Baile", "Host": "Óstach", "How can I help you today?": "Conas is féidir liom cabhrú leat inniu?", @@ -628,6 +630,7 @@ "Knowledge Access": "Rochtain Eolais", "Knowledge created successfully.": "Eolas cruthaithe go rathúil.", "Knowledge deleted successfully.": "D'éirigh leis an eolas a scriosadh.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "D'éirigh le hathshocrú eolais.", "Knowledge updated successfully": "D'éirigh leis an eolas a nuashonrú", "Kokoro.js (Browser)": "Kokoro.js (Brabhsálaí)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Múnla {{modelId}} gan aimsiú", "Model {{modelName}} is not vision capable": "Níl samhail {{modelName}} in ann amharc", "Model {{name}} is now {{status}}": "Tá samhail {{name}} {{status}} anois", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Glacann múnla le hionchuir", "Model created successfully!": "Cruthaíodh múnla go rathúil!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Fuarthas cosán an múnla. Teastaíonn ainm gearr an mhúnla le haghaidh nuashonraithe, ní féidir leanúint ar aghaidh.", @@ -712,6 +717,7 @@ "Models": "Múnlaí", "Models Access": "Rochtain Múnlaí", "Models configuration saved successfully": "Sábháladh cumraíocht na múnlaí go rathúil", + "Models Public Sharing": "", "Mojeek Search API Key": "Eochair API Cuardach Mojeek", "more": "níos mó", "More": "Tuilleadh", @@ -836,6 +842,7 @@ "Prompt updated successfully": "D'éirigh leis an leid a nuashonrú", "Prompts": "Leabhair", "Prompts Access": "Rochtain ar Chuirí", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Tarraing \"{{searchValue}}\" ó Ollama.com", "Pull a model from Ollama.com": "Tarraing múnla ó Ollama.com", @@ -968,9 +975,11 @@ "Share": "Comhroinn", "Share Chat": "Comhroinn Comhrá", "Share to Open WebUI Community": "Comhroinn le Pobal OpenWebUI", + "Sharing Permissions": "", "Show": "Taispeáin", "Show \"What's New\" modal on login": "Taispeáin módúil \"Cad atá Nua\" ar logáil isteach", "Show Admin Details in Account Pending Overlay": "Taispeáin Sonraí Riaracháin sa Chuntas ar Feitheamh Forleagan", + "Show Model": "", "Show shortcuts": "Taispeáin aicearraí", "Show your support!": "Taispeáin do thacaíocht!", "Showcased creativity": "Cruthaitheacht léirithe", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Leid Glaonna Feidhm Uirlisí", "Tools have a function calling system that allows arbitrary code execution": "Tá córas glaonna feidhme ag uirlisí a cheadaíonn forghníomhú cód treallach", "Tools have a function calling system that allows arbitrary code execution.": "Tá córas glaonna feidhme ag uirlisí a cheadaíonn forghníomhú cód treallach.", + "Tools Public Sharing": "", "Top K": "Barr K", "Top K Reranker": "", "Top P": "Barr P", diff --git a/src/lib/i18n/locales/it-IT/translation.json b/src/lib/i18n/locales/it-IT/translation.json index 18e564d9276..a08a790d5f0 100644 --- a/src/lib/i18n/locales/it-IT/translation.json +++ b/src/lib/i18n/locales/it-IT/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Abilita nuove iscrizioni", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assicurati che il tuo file CSV includa 4 colonne in questo ordine: Nome, Email, Password, Ruolo.", "Enter {{role}} message here": "Inserisci il messaggio per {{role}} qui", "Enter a detail about yourself for your LLMs to recall": "Inserisci un dettaglio su di te per che i LLM possano ricordare", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Nascondi", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Come posso aiutarti oggi?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modello {{modelId}} non trovato", "Model {{modelName}} is not vision capable": "Il modello {{modelName}} non è in grado di vedere", "Model {{name}} is now {{status}}": "Il modello {{name}} è ora {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Percorso del filesystem del modello rilevato. Il nome breve del modello è richiesto per l'aggiornamento, impossibile continuare.", @@ -712,6 +717,7 @@ "Models": "Modelli", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Altro", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompt", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Estrai \"{{searchValue}}\" da Ollama.com", "Pull a model from Ollama.com": "Estrai un modello da Ollama.com", @@ -968,9 +975,11 @@ "Share": "Condividi", "Share Chat": "Condividi chat", "Share to Open WebUI Community": "Condividi con la comunità OpenWebUI", + "Sharing Permissions": "", "Show": "Mostra", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "Mostra", "Show your support!": "", "Showcased creativity": "Creatività messa in mostra", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ja-JP/translation.json b/src/lib/i18n/locales/ja-JP/translation.json index 1c346422ace..93a225899e9 100644 --- a/src/lib/i18n/locales/ja-JP/translation.json +++ b/src/lib/i18n/locales/ja-JP/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "新規登録を有効にする", "Enabled": "有効", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSVファイルに4つの列が含まれていることを確認してください: Name, Email, Password, Role.", "Enter {{role}} message here": "{{role}} メッセージをここに入力してください", "Enter a detail about yourself for your LLMs to recall": "LLM が記憶するために、自分についての詳細を入力してください", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "非表示", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "今日はどのようにお手伝いしましょうか?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "ナレッジベースの作成に成功しました", "Knowledge deleted successfully.": "ナレッジベースの削除に成功しました", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "ナレッジベースのリセットに成功しました", "Knowledge updated successfully": "ナレッジベースのアップデートに成功しました", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "モデル {{modelId}} が見つかりません", "Model {{modelName}} is not vision capable": "モデル {{modelName}} は視覚に対応していません", "Model {{name}} is now {{status}}": "モデル {{name}} は {{status}} になりました。", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "モデルファイルシステムパスが検出されました。モデルの短縮名が必要です。更新できません。", @@ -712,6 +717,7 @@ "Models": "モデル", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "もっと見る", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "プロンプト", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com から \"{{searchValue}}\" をプル", "Pull a model from Ollama.com": "Ollama.com からモデルをプル", @@ -968,9 +975,11 @@ "Share": "共有", "Share Chat": "チャットを共有", "Share to Open WebUI Community": "OpenWebUI コミュニティに共有", + "Sharing Permissions": "", "Show": "表示", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "表示", "Show your support!": "", "Showcased creativity": "創造性を披露", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "トップ K", "Top K Reranker": "", "Top P": "トップ P", diff --git a/src/lib/i18n/locales/ka-GE/translation.json b/src/lib/i18n/locales/ka-GE/translation.json index 7abd8eb523a..947f8ebe681 100644 --- a/src/lib/i18n/locales/ka-GE/translation.json +++ b/src/lib/i18n/locales/ka-GE/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "ახალი რეგისტრაციების ჩართვა", "Enabled": "ჩართულია", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "დარწმუნდით, რომ თქვენი CSV-ფაილი შეიცავს 4 ველს ამ მიმდევრობით: სახელი, ელფოსტა, პაროლი, როლი.", "Enter {{role}} message here": "შეიყვანე {{role}} შეტყობინება აქ", "Enter a detail about yourself for your LLMs to recall": "შეიყვანეთ რამე თქვენს შესახებ, რომ თქვენმა LLM-მა გაიხსენოს", @@ -569,6 +570,7 @@ "Hex Color": "თექვსმეტობითი ფერი", "Hex Color - Leave empty for default color": "", "Hide": "დამალვა", + "Hide Model": "", "Home": "მთავარი", "Host": "ჰოსტი", "How can I help you today?": "რით შემიძლია დაგეხმაროთ დღეს?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "Kokoro.js (ბრაუზერი)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "მოდელი {{modelId}} აღმოჩენილი არაა", "Model {{modelName}} is not vision capable": "Model {{modelName}} is not vision capable", "Model {{name}} is now {{status}}": "Model {{name}} is now {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "აღმოჩენილია მოდელის ფაილური სისტემის ბილიკი. განახლებისთვის საჭიროა მოდელის მოკლე სახელი, გაგრძელება შეუძლებელია.", @@ -712,6 +717,7 @@ "Models": "მოდელები", "Models Access": "მოდელის წვდომა", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "მეტი", "More": "მეტი", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "მოთხოვნები", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\"-ის გადმოწერა Ollama.com-იდან", "Pull a model from Ollama.com": "მოდელის გადმოწერა Ollama.com-დან", @@ -968,9 +975,11 @@ "Share": "გაზიარება", "Share Chat": "ჩატის გაზიარება", "Share to Open WebUI Community": "გაზიარება Open WebUI-ის საზოგადოებასთან", + "Sharing Permissions": "", "Show": "ჩვენება", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "მალსახმობების ჩვენება", "Show your support!": "", "Showcased creativity": "გამოკვეთილი კრეატიულობა", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "ტოპ K", "Top K Reranker": "", "Top P": "ტოპ P", diff --git a/src/lib/i18n/locales/ko-KR/translation.json b/src/lib/i18n/locales/ko-KR/translation.json index a5af6710458..4d4dec87427 100644 --- a/src/lib/i18n/locales/ko-KR/translation.json +++ b/src/lib/i18n/locales/ko-KR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "새 회원가입 활성화", "Enabled": "활성화됨", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV 파일에 이름, 이메일, 비밀번호, 역할 4개의 열이 순서대로 포함되어 있는지 확인하세요.", "Enter {{role}} message here": "여기에 {{role}} 메시지 입력", "Enter a detail about yourself for your LLMs to recall": "자신에 대한 세부사항을 입력하여 LLM들이 기억할 수 있도록 하세요.", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "숨기기", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "오늘 어떻게 도와드릴까요?", @@ -628,6 +630,7 @@ "Knowledge Access": "지식 접근", "Knowledge created successfully.": "성공적으로 지식 기반이 생성되었습니다", "Knowledge deleted successfully.": "성공적으로 지식 기반이 삭제되었습니다", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "성공적으로 지식 기반이 초기화되었습니다", "Knowledge updated successfully": "성공적으로 지식 기반이 업데이트되었습니다", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "{{modelId}} 모델을 찾을 수 없습니다.", "Model {{modelName}} is not vision capable": "{{modelName}} 모델은 비전을 사용할 수 없습니다.", "Model {{name}} is now {{status}}": "{{name}} 모델은 이제 {{status}} 상태입니다.", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "모델이 이미지 삽입을 허용합니다", "Model created successfully!": "성공적으로 모델이 생성되었습니다", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "모델 파일 시스템 경로가 감지되었습니다. 업데이트하려면 모델 단축 이름이 필요하며 계속할 수 없습니다.", @@ -712,6 +717,7 @@ "Models": "모델", "Models Access": "모델 접근", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API 키", "more": "더보기", "More": "더보기", @@ -836,6 +842,7 @@ "Prompt updated successfully": "성공적으로 프롬프트를 수정했습니다", "Prompts": "프롬프트", "Prompts Access": "프롬프트 접근", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com에서 \"{{searchValue}}\" 가져오기", "Pull a model from Ollama.com": "Ollama.com에서 모델 가져오기(pull)", @@ -968,9 +975,11 @@ "Share": "공유", "Share Chat": "채팅 공유", "Share to Open WebUI Community": "OpenWebUI 커뮤니티에 공유", + "Sharing Permissions": "", "Show": "보기", "Show \"What's New\" modal on login": "로그인시 \"새로운 기능\" 모달 보기", "Show Admin Details in Account Pending Overlay": "사용자용 계정 보류 설명창에, 관리자 상세 정보 노출", + "Show Model": "", "Show shortcuts": "단축키 보기", "Show your support!": "당신의 응원을 보내주세요!", "Showcased creativity": "창의성 발휘", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "도구에 임의 코드 실행을 허용하는 함수가 포함되어 있습니다", "Tools have a function calling system that allows arbitrary code execution.": "도구에 임의 코드 실행을 허용하는 함수가 포함되어 있습니다.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/lt-LT/translation.json b/src/lib/i18n/locales/lt-LT/translation.json index 2f1cc2ea3b4..8b0f53d2d18 100644 --- a/src/lib/i18n/locales/lt-LT/translation.json +++ b/src/lib/i18n/locales/lt-LT/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktyvuoti naujas registracijas", "Enabled": "Leisti", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Įsitikinkite, kad CSV failas turi 4 kolonas šiuo eiliškumu: Name, Email, Password, Role.", "Enter {{role}} message here": "Įveskite {{role}} žinutę čia", "Enter a detail about yourself for your LLMs to recall": "Įveskite informaciją apie save jūsų modelio atminčiai", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Paslėpti", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Kuo galėčiau Jums padėti ?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelis {{modelId}} nerastas", "Model {{modelName}} is not vision capable": "Modelis {{modelName}} neturi vaizdo gebėjimų", "Model {{name}} is now {{status}}": "Modelis {{name}} dabar {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "Modelis sukurtas sėkmingai", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modelio failų sistemos kelias aptiktas. Reikalingas trumpas modelio pavadinimas atnaujinimui.", @@ -712,6 +717,7 @@ "Models": "Modeliai", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Daugiau", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Užklausos", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Rasti \"{{searchValue}}\" iš Ollama.com", "Pull a model from Ollama.com": "Gauti modelį iš Ollama.com", @@ -968,9 +975,11 @@ "Share": "Dalintis", "Share Chat": "Dalintis pokalbiu", "Share to Open WebUI Community": "Dalintis su OpenWebUI bendruomene", + "Sharing Permissions": "", "Show": "Rodyti", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Rodyti administratoriaus duomenis laukiant paskyros patvirtinimo", + "Show Model": "", "Show shortcuts": "Rodyti trumpinius", "Show your support!": "Palaikykite", "Showcased creativity": "Kūrybingų užklausų paroda", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Įrankiai gali naudoti funkcijas ir leisti vykdyti kodą", "Tools have a function calling system that allows arbitrary code execution.": "Įrankiai gali naudoti funkcijas ir leisti vykdyti kodą", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ms-MY/translation.json b/src/lib/i18n/locales/ms-MY/translation.json index 6554e14e193..4ec87734f19 100644 --- a/src/lib/i18n/locales/ms-MY/translation.json +++ b/src/lib/i18n/locales/ms-MY/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Benarkan Pendaftaran Baharu", "Enabled": "Dibenarkan", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "astikan fail CSV anda mengandungi 4 lajur dalam susunan ini: Nama, E-mel, Kata Laluan, Peranan.", "Enter {{role}} message here": "Masukkan mesej {{role}} di sini", "Enter a detail about yourself for your LLMs to recall": "Masukkan butiran tentang diri anda untuk diingati oleh LLM anda", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Sembunyi", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Bagaimana saya boleh membantu anda hari ini?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{ modelId }} tidak dijumpai", "Model {{modelName}} is not vision capable": "Model {{ modelName }} tidak mempunyai keupayaan penglihatan", "Model {{name}} is now {{status}}": "Model {{name}} kini {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "Model berjaya dibuat!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Laluan sistem fail model dikesan. Nama pendek model diperlukan untuk kemas kini, tidak boleh diteruskan.", @@ -712,6 +717,7 @@ "Models": "Model", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Lagi", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Gesaan", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Tarik \"{{ searchValue }}\" daripada Ollama.com", "Pull a model from Ollama.com": "Tarik model dari Ollama.com", @@ -968,9 +975,11 @@ "Share": "Kongsi", "Share Chat": "Kongsi Perbualan", "Share to Open WebUI Community": "Kongsi kepada Komuniti OpenWebUI", + "Sharing Permissions": "", "Show": "Tunjukkan", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Tunjukkan Butiran Pentadbir dalam Akaun Menunggu Tindanan", + "Show Model": "", "Show shortcuts": "Tunjukkan pintasan", "Show your support!": "Tunjukkan sokongan anda!", "Showcased creativity": "eativiti yang dipamerkan", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Alatan mempunyai sistem panggilan fungsi yang membolehkan pelaksanaan kod sewenang-wenangnya", "Tools have a function calling system that allows arbitrary code execution.": "Alatan mempunyai sistem panggilan fungsi yang membolehkan pelaksanaan kod sewenang-wenangnya.", + "Tools Public Sharing": "", "Top K": "'Top K'", "Top K Reranker": "", "Top P": "'Top P'", diff --git a/src/lib/i18n/locales/nb-NO/translation.json b/src/lib/i18n/locales/nb-NO/translation.json index 58a029e10e7..59a45be710d 100644 --- a/src/lib/i18n/locales/nb-NO/translation.json +++ b/src/lib/i18n/locales/nb-NO/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktiver nye registreringer", "Enabled": "Aktivert", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at CSV-filen din inkluderer fire kolonner i denne rekkefølgen: Navn, E-post, Passord, Rolle.", "Enter {{role}} message here": "Skriv inn {{role}} melding her", "Enter a detail about yourself for your LLMs to recall": "Skriv inn en detalj om deg selv som språkmodellene dine kan huske", @@ -569,6 +570,7 @@ "Hex Color": "Hex-farge", "Hex Color - Leave empty for default color": "Hex-farge – la stå tom for standard farge", "Hide": "Skjul", + "Hide Model": "", "Home": "Hjem", "Host": "Host", "How can I help you today?": "Hva kan jeg hjelpe deg med i dag?", @@ -628,6 +630,7 @@ "Knowledge Access": "Tilgang til kunnskap", "Knowledge created successfully.": "Kunnskap opprettet.", "Knowledge deleted successfully.": "Kunnskap slettet.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Tilbakestilling av kunnskap vellykket.", "Knowledge updated successfully": "Kunnskap oppdatert", "Kokoro.js (Browser)": "Kokoro.js (nettleser)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Finner ikke modellen {{modelId}}", "Model {{modelName}} is not vision capable": "Modellen {{modelName}} er ikke egnet til visuelle data", "Model {{name}} is now {{status}}": "Modellen {{name}} er nå {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modellen godtar bildeinndata", "Model created successfully!": "Modellen er opprettet!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modellfilsystembane oppdaget. Kan ikke fortsette fordi modellens kortnavn er påkrevd for oppdatering.", @@ -712,6 +717,7 @@ "Models": "Modeller", "Models Access": "Tilgang til modeller", "Models configuration saved successfully": "Kofigurasjon av modeller er lagret", + "Models Public Sharing": "", "Mojeek Search API Key": "API-nøekkel for Mojeek Search", "more": "mer", "More": "Mer", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Ledetekst oppdatert", "Prompts": "Ledetekster", "Prompts Access": "Tilgang til ledetekster", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Hent {{searchValue}} fra Ollama.com", "Pull a model from Ollama.com": "Hent en modell fra Ollama.com", @@ -968,9 +975,11 @@ "Share": "Del", "Share Chat": "Del chat", "Share to Open WebUI Community": "Del med OpenWebUI-fellesskapet", + "Sharing Permissions": "", "Show": "Vis", "Show \"What's New\" modal on login": "Vis \"Hva er nytt\"-modal ved innlogging", "Show Admin Details in Account Pending Overlay": "Vis administratordetaljer i ventende kontovisning", + "Show Model": "", "Show shortcuts": "Vis snarveier", "Show your support!": "Vis din støtte!", "Showcased creativity": "Fremhevet kreativitet", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Ledetekst for kalling av verktøyfunksjonen", "Tools have a function calling system that allows arbitrary code execution": "Verktøy inneholder et funksjonskallsystem som tillater vilkårlig kodekjøring", "Tools have a function calling system that allows arbitrary code execution.": "Verktøy inneholder et funksjonskallsystem som tillater vilkårlig kodekjøring.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index e6e202918c5..342aac6a28b 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Schakel nieuwe registraties in", "Enabled": "Ingeschakeld", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Zorg ervoor dat uw CSV-bestand de volgende vier kolommen in deze volgorde bevat: Naam, E-mail, Wachtwoord, Rol.", "Enter {{role}} message here": "Voeg {{role}} bericht hier toe", "Enter a detail about yourself for your LLMs to recall": "Voer een detail over jezelf in zodat LLM's het kunnen onthouden", @@ -569,6 +570,7 @@ "Hex Color": "Hex-kleur", "Hex Color - Leave empty for default color": "Hex-kleur - laat leeg voor standaardkleur", "Hide": "Verberg", + "Hide Model": "", "Home": "", "Host": "Host", "How can I help you today?": "Hoe kan ik je vandaag helpen?", @@ -628,6 +630,7 @@ "Knowledge Access": "Kennistoegang", "Knowledge created successfully.": "Kennis succesvol aangemaakt", "Knowledge deleted successfully.": "Kennis succesvol verwijderd", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Kennis succesvol gereset", "Knowledge updated successfully": "Kennis succesvol bijgewerkt", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} niet gevonden", "Model {{modelName}} is not vision capable": "Model {{modelName}} is niet geschikt voor visie", "Model {{name}} is now {{status}}": "Model {{name}} is nu {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model accepteerd afbeeldingsinvoer", "Model created successfully!": "Model succesvol gecreëerd", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filesystem path gedetecteerd. Model shortname is vereist voor update, kan niet doorgaan.", @@ -712,6 +717,7 @@ "Models": "Modellen", "Models Access": "Modellentoegang", "Models configuration saved successfully": "Modellenconfiguratie succeslvol opgeslagen", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API-sleutel", "more": "Meer", "More": "Meer", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt succesvol bijgewerkt", "Prompts": "Prompts", "Prompts Access": "Prompttoegang", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Haal \"{{searchValue}}\" uit Ollama.com", "Pull a model from Ollama.com": "Haal een model van Ollama.com", @@ -968,9 +975,11 @@ "Share": "Delen", "Share Chat": "Deel chat", "Share to Open WebUI Community": "Deel naar OpenWebUI-community", + "Sharing Permissions": "", "Show": "Toon", "Show \"What's New\" modal on login": "Toon \"Wat is nieuw\" bij inloggen", "Show Admin Details in Account Pending Overlay": "Admin-details weergeven in overlay in afwachting van account", + "Show Model": "", "Show shortcuts": "Toon snelkoppelingen", "Show your support!": "Toon je steun", "Showcased creativity": "Toonde creativiteit", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", "Tools have a function calling system that allows arbitrary code execution.": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/pa-IN/translation.json b/src/lib/i18n/locales/pa-IN/translation.json index 46a288531b1..c0148a70d98 100644 --- a/src/lib/i18n/locales/pa-IN/translation.json +++ b/src/lib/i18n/locales/pa-IN/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "ਨਵੇਂ ਸਾਈਨ ਅਪ ਯੋਗ ਕਰੋ", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ਸੁਨਿਸ਼ਚਿਤ ਕਰੋ ਕਿ ਤੁਹਾਡੀ CSV ਫਾਈਲ ਵਿੱਚ ਇਸ ਕ੍ਰਮ ਵਿੱਚ 4 ਕਾਲਮ ਹਨ: ਨਾਮ, ਈਮੇਲ, ਪਾਸਵਰਡ, ਭੂਮਿਕਾ।", "Enter {{role}} message here": "{{role}} ਸੁਨੇਹਾ ਇੱਥੇ ਦਰਜ ਕਰੋ", "Enter a detail about yourself for your LLMs to recall": "ਤੁਹਾਡੇ LLMs ਨੂੰ ਸੁਨੇਹਾ ਕਰਨ ਲਈ ਸੁਨੇਹਾ ਇੱਥੇ ਦਰਜ ਕਰੋ", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "ਲੁਕਾਓ", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "ਮੈਂ ਅੱਜ ਤੁਹਾਡੀ ਕਿਵੇਂ ਮਦਦ ਕਰ ਸਕਦਾ ਹਾਂ?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "ਮਾਡਲ {{modelId}} ਨਹੀਂ ਮਿਲਿਆ", "Model {{modelName}} is not vision capable": "ਮਾਡਲ {{modelName}} ਦ੍ਰਿਸ਼ਟੀ ਸਮਰੱਥ ਨਹੀਂ ਹੈ", "Model {{name}} is now {{status}}": "ਮਾਡਲ {{name}} ਹੁਣ {{status}} ਹੈ", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "ਮਾਡਲ ਫਾਈਲਸਿਸਟਮ ਪੱਥ ਪਾਇਆ ਗਿਆ। ਅੱਪਡੇਟ ਲਈ ਮਾਡਲ ਸ਼ੌਰਟਨੇਮ ਦੀ ਲੋੜ ਹੈ, ਜਾਰੀ ਨਹੀਂ ਰੱਖ ਸਕਦੇ।", @@ -712,6 +717,7 @@ "Models": "ਮਾਡਲ", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "ਹੋਰ", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "ਪ੍ਰੰਪਟ", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ \"{{searchValue}}\" ਖਿੱਚੋ", "Pull a model from Ollama.com": "ਓਲਾਮਾ.ਕਾਮ ਤੋਂ ਇੱਕ ਮਾਡਲ ਖਿੱਚੋ", @@ -968,9 +975,11 @@ "Share": "ਸਾਂਝਾ ਕਰੋ", "Share Chat": "ਗੱਲਬਾਤ ਸਾਂਝੀ ਕਰੋ", "Share to Open WebUI Community": "ਓਪਨਵੈਬਯੂਆਈ ਕਮਿਊਨਿਟੀ ਨਾਲ ਸਾਂਝਾ ਕਰੋ", + "Sharing Permissions": "", "Show": "ਦਿਖਾਓ", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "ਸ਼ਾਰਟਕਟ ਦਿਖਾਓ", "Show your support!": "", "Showcased creativity": "ਸਿਰਜਣਾਤਮਕਤਾ ਦਿਖਾਈ", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "ਸਿਖਰ K", "Top K Reranker": "", "Top P": "ਸਿਖਰ P", diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 9f27a88699d..0d7185a2060 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Włącz nowe rejestracje", "Enabled": "Włączone", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Upewnij się, że twój plik CSV zawiera dokładnie 4 kolumny w następującej kolejności: Nazwa, Email, Hasło, Rola.", "Enter {{role}} message here": "Wprowadź komunikat dla {{role}} tutaj", "Enter a detail about yourself for your LLMs to recall": "Podaj informacje o sobie, aby LLMs mogły je przypomnieć.", @@ -569,6 +570,7 @@ "Hex Color": "Kolor heksadecymalny", "Hex Color - Leave empty for default color": "Kolor heksadecymalny - pozostaw puste dla domyślnego koloru", "Hide": "Ukryj", + "Hide Model": "", "Home": "Dom", "Host": "Serwer", "How can I help you today?": "Jak mogę Ci dzisiaj pomóc?", @@ -628,6 +630,7 @@ "Knowledge Access": "Dostęp do wiedzy", "Knowledge created successfully.": "Pomyślnie utworzona wiedza.", "Knowledge deleted successfully.": "Wiedza została usunięta pomyślnie.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Pomyślnie zresetowano wiedzę.", "Knowledge updated successfully": "Wiedza zaktualizowana pomyślnie", "Kokoro.js (Browser)": "Kokoro.js (Przeglądarka)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model o identyfikatorze {{modelId}} nie został znaleziony.", "Model {{modelName}} is not vision capable": "Model {{modelName}} nie jest zdolny do widzenia", "Model {{name}} is now {{status}}": "Model {{name}} jest teraz {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model przyjmuje wejścia obrazowe", "Model created successfully!": "Model utworzony pomyślnie!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Wykryto ścieżkę systemu plików modelu. Podanie krótkiej nazwy modelu jest wymagane do aktualizacji, nie można kontynuować.", @@ -712,6 +717,7 @@ "Models": "Modele", "Models Access": "Dostęp do modeli", "Models configuration saved successfully": "Konfiguracja modeli została zapisana pomyślnie", + "Models Public Sharing": "", "Mojeek Search API Key": "Klucz API Mojeek Search", "more": "więcej", "More": "Więcej", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Podpowiedź została zaktualizowana pomyślnie.", "Prompts": "Podpowiedzi", "Prompts Access": "Dostęp do podpowiedzi", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Pobierz \"{{searchValue}}\" z Ollama.com", "Pull a model from Ollama.com": "Pobierz model z Ollama.com", @@ -968,9 +975,11 @@ "Share": "Podziel się", "Share Chat": "Udostępnij rozmowę", "Share to Open WebUI Community": "Udostępnij w społeczności OpenWebUI", + "Sharing Permissions": "", "Show": "Wyświetl", "Show \"What's New\" modal on login": "Wyświetl okno dialogowe \"What's New\" podczas logowania", "Show Admin Details in Account Pending Overlay": "Wyświetl szczegóły administratora w okienu informacyjnym o potrzebie zatwierdzenia przez administratora konta użytkownika", + "Show Model": "", "Show shortcuts": "Wyświetl skróty", "Show your support!": "Wyraź swoje poparcie!", "Showcased creativity": "Prezentacja kreatywności", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Narzędzia Funkcja Wywołania Prompta", "Tools have a function calling system that allows arbitrary code execution": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonywanie dowolnego kodu", "Tools have a function calling system that allows arbitrary code execution.": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonanie dowolnego kodu.", + "Tools Public Sharing": "", "Top K": "Najlepsze K", "Top K Reranker": "", "Top P": "Najlepsze P", diff --git a/src/lib/i18n/locales/pt-BR/translation.json b/src/lib/i18n/locales/pt-BR/translation.json index d132665eac1..ee62030b419 100644 --- a/src/lib/i18n/locales/pt-BR/translation.json +++ b/src/lib/i18n/locales/pt-BR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ativar Novos Cadastros", "Enabled": "Ativado", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Certifique-se de que seu arquivo CSV inclua 4 colunas nesta ordem: Nome, Email, Senha, Função.", "Enter {{role}} message here": "Digite a mensagem de {{role}} aqui", "Enter a detail about yourself for your LLMs to recall": "Digite um detalhe sobre você para seus LLMs lembrarem", @@ -569,6 +570,7 @@ "Hex Color": "Cor hexadecimal", "Hex Color - Leave empty for default color": "Cor Hexadecimal - Deixe em branco para a cor padrão", "Hide": "Ocultar", + "Hide Model": "", "Home": "", "Host": "Servidor", "How can I help you today?": "Como posso ajudar você hoje?", @@ -628,6 +630,7 @@ "Knowledge Access": "Acesso ao Conhecimento", "Knowledge created successfully.": "Conhecimento criado com sucesso.", "Knowledge deleted successfully.": "Conhecimento excluído com sucesso.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Conhecimento resetado com sucesso.", "Knowledge updated successfully": "Conhecimento atualizado com sucesso", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelo {{modelId}} não encontrado", "Model {{modelName}} is not vision capable": "Modelo {{modelName}} não é capaz de visão", "Model {{name}} is now {{status}}": "Modelo {{name}} está agora {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modelo aceita entradas de imagens", "Model created successfully!": "Modelo criado com sucesso!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Caminho do sistema de arquivos do modelo detectado. Nome curto do modelo é necessário para atualização, não é possível continuar.", @@ -712,6 +717,7 @@ "Models": "Modelos", "Models Access": "Acesso aos Modelos", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "Chave de API Mojeel Search", "more": "mais", "More": "Mais", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt atualizado com sucesso", "Prompts": "Prompts", "Prompts Access": "Acessar prompts", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Obter \"{{searchValue}}\" de Ollama.com", "Pull a model from Ollama.com": "Obter um modelo de Ollama.com", @@ -968,9 +975,11 @@ "Share": "Compartilhar", "Share Chat": "Compartilhar Chat", "Share to Open WebUI Community": "Compartilhar com a Comunidade OpenWebUI", + "Sharing Permissions": "", "Show": "Mostrar", "Show \"What's New\" modal on login": "Mostrar \"O que há de Novo\" no login", "Show Admin Details in Account Pending Overlay": "Mostrar Detalhes do Administrador na Sobreposição de Conta Pendentes", + "Show Model": "", "Show shortcuts": "Mostrar atalhos", "Show your support!": "Mostre seu apoio!", "Showcased creativity": "Criatividade exibida", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Ferramentas possuem um sistema de chamada de funções que permite a execução de código arbitrário", "Tools have a function calling system that allows arbitrary code execution.": "Ferramentas possuem um sistema de chamada de funções que permite a execução de código arbitrário.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/pt-PT/translation.json b/src/lib/i18n/locales/pt-PT/translation.json index 8bbaa2f1f16..11f3831e1af 100644 --- a/src/lib/i18n/locales/pt-PT/translation.json +++ b/src/lib/i18n/locales/pt-PT/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Ativar Novas Inscrições", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Confirme que o seu ficheiro CSV inclui 4 colunas nesta ordem: Nome, E-mail, Senha, Função.", "Enter {{role}} message here": "Escreva a mensagem de {{role}} aqui", "Enter a detail about yourself for your LLMs to recall": "Escreva um detalhe sobre você para que os seus LLMs possam lembrar-se", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Ocultar", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Como posso ajudá-lo hoje?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelo {{modelId}} não foi encontrado", "Model {{modelName}} is not vision capable": "O modelo {{modelName}} não é capaz de visão", "Model {{name}} is now {{status}}": "Modelo {{name}} agora é {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Dtectado caminho do sistema de ficheiros do modelo. É necessário o nome curto do modelo para atualização, não é possível continuar.", @@ -712,6 +717,7 @@ "Models": "Modelos", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Mais", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompts", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Puxar \"{{searchValue}}\" do Ollama.com", "Pull a model from Ollama.com": "Puxar um modelo do Ollama.com", @@ -968,9 +975,11 @@ "Share": "Partilhar", "Share Chat": "Partilhar Conversa", "Share to Open WebUI Community": "Partilhar com a Comunidade OpenWebUI", + "Sharing Permissions": "", "Show": "Mostrar", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Mostrar Detalhes do Administrador na sobreposição de Conta Pendente", + "Show Model": "", "Show shortcuts": "Mostrar atalhos", "Show your support!": "", "Showcased creativity": "Criatividade Exibida", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ro-RO/translation.json b/src/lib/i18n/locales/ro-RO/translation.json index e67c755f00e..cc89298bdb4 100644 --- a/src/lib/i18n/locales/ro-RO/translation.json +++ b/src/lib/i18n/locales/ro-RO/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Activează Înscrierile Noi", "Enabled": "Activat", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asigurați-vă că fișierul CSV include 4 coloane în această ordine: Nume, Email, Parolă, Rol.", "Enter {{role}} message here": "Introduceți mesajul pentru {{role}} aici", "Enter a detail about yourself for your LLMs to recall": "Introduceți un detaliu despre dvs. pe care LLM-urile să-l rețină", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Ascunde", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Cum te pot ajuta astăzi?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "Cunoașterea a fost creată cu succes.", "Knowledge deleted successfully.": "Cunoștințele au fost șterse cu succes.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Resetarea cunoștințelor a fost efectuată cu succes.", "Knowledge updated successfully": "Cunoașterea a fost actualizată cu succes", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modelul {{modelId}} nu a fost găsit", "Model {{modelName}} is not vision capable": "Modelul {{modelName}} nu are capacități de viziune", "Model {{name}} is now {{status}}": "Modelul {{name}} este acum {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Modelul acceptă imagini ca intrări.", "Model created successfully!": "Modelul a fost creat cu succes!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Calea sistemului de fișiere al modelului detectată. Este necesar numele scurt al modelului pentru actualizare, nu se poate continua.", @@ -712,6 +717,7 @@ "Models": "Modele", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "mai mult", "More": "Mai multe", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompturi", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Extrage \"{{searchValue}}\" de pe Ollama.com", "Pull a model from Ollama.com": "Extrage un model de pe Ollama.com", @@ -968,9 +975,11 @@ "Share": "Partajează", "Share Chat": "Partajează Conversația", "Share to Open WebUI Community": "Partajează cu Comunitatea OpenWebUI", + "Sharing Permissions": "", "Show": "Afișează", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Afișează Detaliile Administratorului în Suprapunerea Contului În Așteptare", + "Show Model": "", "Show shortcuts": "Afișează scurtături", "Show your support!": "Arată-ți susținerea!", "Showcased creativity": "Creativitate expusă", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Instrumentele au un sistem de apelare a funcțiilor care permite executarea arbitrară a codului", "Tools have a function calling system that allows arbitrary code execution.": "Instrumentele au un sistem de apelare a funcțiilor care permite executarea arbitrară a codului.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 4ef8455b389..d68173e8979 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Включите выборку Mirostat для контроля путаницы.", "Enable New Sign Ups": "Разрешить новые регистрации", "Enabled": "Включено", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Убедитесь, что ваш CSV-файл включает в себя 4 столбца в следующем порядке: Имя, Электронная почта, Пароль, Роль.", "Enter {{role}} message here": "Введите сообщение {{role}} здесь", "Enter a detail about yourself for your LLMs to recall": "Введите детали о себе, чтобы LLMs могли запомнить", @@ -569,6 +570,7 @@ "Hex Color": "Цвет Hex", "Hex Color - Leave empty for default color": "Цвет Hex - оставьте пустым значение цвета по умолчанию", "Hide": "Скрыть", + "Hide Model": "", "Home": "Домой", "Host": "Хост", "How can I help you today?": "Чем я могу помочь вам сегодня?", @@ -628,6 +630,7 @@ "Knowledge Access": "Доступ к Знаниям", "Knowledge created successfully.": "Знания созданы успешно.", "Knowledge deleted successfully.": "Знания успешно удалены.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Знания успешно сброшены.", "Knowledge updated successfully": "Знания успешно обновлены", "Kokoro.js (Browser)": "Kokoro.js (Браузер)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Модель {{modelId}} не найдена", "Model {{modelName}} is not vision capable": "Модель {{modelName}} не поддерживает зрение", "Model {{name}} is now {{status}}": "Модель {{name}} теперь {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Модель принимает изображения как входные данные", "Model created successfully!": "Модель успешно создана!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Обнаружен путь к файловой системе модели. Для обновления требуется краткое имя модели, не удается продолжить.", @@ -712,6 +717,7 @@ "Models": "Модели", "Models Access": "Доступ к Моделям", "Models configuration saved successfully": "Конфигурация модели успешно сохранена.", + "Models Public Sharing": "", "Mojeek Search API Key": "Ключ API для поиска Mojeek", "more": "больше", "More": "Больше", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Промпт успешно обновлён", "Prompts": "Промпты", "Prompts Access": "Доступ к промптам", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Загрузить \"{{searchValue}}\" с Ollama.com", "Pull a model from Ollama.com": "Загрузить модель с Ollama.com", @@ -968,9 +975,11 @@ "Share": "Поделиться", "Share Chat": "Поделиться чатом", "Share to Open WebUI Community": "Поделиться с сообществом OpenWebUI", + "Sharing Permissions": "", "Show": "Показать", "Show \"What's New\" modal on login": "Показывать окно «Что нового» при входе в систему", "Show Admin Details in Account Pending Overlay": "Показывать данные администратора в оверлее ожидающей учетной записи", + "Show Model": "", "Show shortcuts": "Показать горячие клавиши", "Show your support!": "Поддержите нас!", "Showcased creativity": "Продемонстрирован творческий подход", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Промпт на вызов функции Инструменты", "Tools have a function calling system that allows arbitrary code execution": "Инструменты имеют систему вызова функций, которая позволяет выполнять произвольный код", "Tools have a function calling system that allows arbitrary code execution.": "Инструменты имеют систему вызова функций, которая позволяет выполнять произвольный код.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/sk-SK/translation.json b/src/lib/i18n/locales/sk-SK/translation.json index 909be6e99a6..95f488fc113 100644 --- a/src/lib/i18n/locales/sk-SK/translation.json +++ b/src/lib/i18n/locales/sk-SK/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Povoliť nové registrácie", "Enabled": "Povolené", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Uistite sa, že váš CSV súbor obsahuje 4 stĺpce v tomto poradí: Name, Email, Password, Role.", "Enter {{role}} message here": "Zadajte správu {{role}} sem", "Enter a detail about yourself for your LLMs to recall": "Zadajte podrobnosť o sebe, ktorú si vaše LLM majú zapamätať.", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Skryť", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Ako vám môžem dnes pomôcť?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "Znalosť úspešne vytvorená.", "Knowledge deleted successfully.": "Znalosti boli úspešne odstránené.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Úspešné obnovenie znalostí.", "Knowledge updated successfully": "Znalosti úspešne aktualizované", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} nebol nájdený", "Model {{modelName}} is not vision capable": "Model {{modelName}} nie je schopný spracovávať vizuálne údaje.", "Model {{name}} is now {{status}}": "Model {{name}} je teraz {{status}}.", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model prijíma vstupy vo forme obrázkov", "Model created successfully!": "Model bol úspešne vytvorený!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Zistená cesta v súborovom systéme. Je vyžadovaný krátky názov modelu pre aktualizáciu, nemožno pokračovať.", @@ -712,6 +717,7 @@ "Models": "Modely", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "viac", "More": "Viac", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompty", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Stiahnite \"{{searchValue}}\" z Ollama.com", "Pull a model from Ollama.com": "Stiahnite model z Ollama.com", @@ -968,9 +975,11 @@ "Share": "Zdieľať", "Share Chat": "Zdieľať chat", "Share to Open WebUI Community": "Zdieľať s komunitou OpenWebUI", + "Sharing Permissions": "", "Show": "Zobraziť", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Zobraziť podrobnosti administrátora v prekryvnom okne s čakajúcim účtom", + "Show Model": "", "Show shortcuts": "Zobraziť klávesové skratky", "Show your support!": "Vyjadrite svoju podporu!", "Showcased creativity": "Predvedená kreativita", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Nástroje majú systém volania funkcií, ktorý umožňuje ľubovoľné spúšťanie kódu.", "Tools have a function calling system that allows arbitrary code execution.": "Nástroje majú systém volania funkcií, ktorý umožňuje spúšťanie ľubovoľného kódu.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/sr-RS/translation.json b/src/lib/i18n/locales/sr-RS/translation.json index 3b644554d81..60f2a3c2010 100644 --- a/src/lib/i18n/locales/sr-RS/translation.json +++ b/src/lib/i18n/locales/sr-RS/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Омогући нове пријаве", "Enabled": "Омогућено", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверите се да ваша CSV датотека укључује 4 колоне у овом редоследу: Име, Е-пошта, Лозинка, Улога.", "Enter {{role}} message here": "Унесите {{role}} поруку овде", "Enter a detail about yourself for your LLMs to recall": "Унесите детаље за себе да ће LLMs преузимати", @@ -569,6 +570,7 @@ "Hex Color": "Хекс боја", "Hex Color - Leave empty for default color": "Хекс боја (празно за подразумевано)", "Hide": "Сакриј", + "Hide Model": "", "Home": "", "Host": "Домаћин", "How can I help you today?": "Како могу да вам помогнем данас?", @@ -628,6 +630,7 @@ "Knowledge Access": "Приступ знању", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Модел {{modelId}} није пронађен", "Model {{modelName}} is not vision capable": "Модел {{моделНаме}} није способан за вид", "Model {{name}} is now {{status}}": "Модел {{наме}} је сада {{статус}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Откривена путања система датотека модела. За ажурирање је потребан кратак назив модела, не може се наставити.", @@ -712,6 +717,7 @@ "Models": "Модели", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "више", "More": "Више", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Упит измењен успешно", "Prompts": "Упити", "Prompts Access": "Приступ упитима", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Повуците \"{{searchValue}}\" са Ollama.com", "Pull a model from Ollama.com": "Повуците модел са Ollama.com", @@ -968,9 +975,11 @@ "Share": "Подели", "Share Chat": "Подели ћаскање", "Share to Open WebUI Community": "Подели са OpenWebUI заједницом", + "Sharing Permissions": "", "Show": "Прикажи", "Show \"What's New\" modal on login": "Прикажи \"Погледај шта је ново\" прозорче при пријави", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "Прикажи пречице", "Show your support!": "", "Showcased creativity": "Приказана креативност", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "Топ К", "Top K Reranker": "", "Top P": "Топ П", diff --git a/src/lib/i18n/locales/sv-SE/translation.json b/src/lib/i18n/locales/sv-SE/translation.json index 418aacf3cbf..b983392971c 100644 --- a/src/lib/i18n/locales/sv-SE/translation.json +++ b/src/lib/i18n/locales/sv-SE/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Aktivera nya registreringar", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Se till att din CSV-fil innehåller fyra kolumner i denna ordning: Name, Email, Password, Role.", "Enter {{role}} message here": "Skriv {{role}} meddelande här", "Enter a detail about yourself for your LLMs to recall": "Skriv en detalj om dig själv för att dina LLMs ska komma ihåg", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Dölj", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Hur kan jag hjälpa dig idag?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Modell {{modelId}} hittades inte", "Model {{modelName}} is not vision capable": "Modellen {{modelName}} är inte synkapabel", "Model {{name}} is now {{status}}": "Modellen {{name}} är nu {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modellens filsystemväg upptäckt. Modellens kortnamn krävs för uppdatering, kan inte fortsätta.", @@ -712,6 +717,7 @@ "Models": "Modeller", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Mer", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Instruktioner", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ladda ner \"{{searchValue}}\" från Ollama.com", "Pull a model from Ollama.com": "Ladda ner en modell från Ollama.com", @@ -968,9 +975,11 @@ "Share": "Dela", "Share Chat": "Dela chatt", "Share to Open WebUI Community": "Dela till OpenWebUI Community", + "Sharing Permissions": "", "Show": "Visa", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Visa administratörsinformation till väntande konton", + "Show Model": "", "Show shortcuts": "Visa genvägar", "Show your support!": "Visa ditt stöd!", "Showcased creativity": "Visade kreativitet", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Prompt för anrop av verktygsfunktion:", "Tools have a function calling system that allows arbitrary code execution": "Verktyg har ett funktionsanropssystem som tillåter godtycklig kodkörning", "Tools have a function calling system that allows arbitrary code execution.": "Verktyg har ett funktionsanropssystem som tillåter godtycklig kodkörning", + "Tools Public Sharing": "", "Top K": "Topp K", "Top K Reranker": "", "Top P": "Topp P", diff --git a/src/lib/i18n/locales/th-TH/translation.json b/src/lib/i18n/locales/th-TH/translation.json index 10e5910a2d5..856071a35c2 100644 --- a/src/lib/i18n/locales/th-TH/translation.json +++ b/src/lib/i18n/locales/th-TH/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "เปิดใช้งานการสมัครใหม่", "Enabled": "เปิดใช้งาน", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ตรวจสอบว่าไฟล์ CSV ของคุณมี 4 คอลัมน์ในลำดับนี้: ชื่อ, อีเมล, รหัสผ่าน, บทบาท", "Enter {{role}} message here": "ใส่ข้อความ {{role}} ที่นี่", "Enter a detail about yourself for your LLMs to recall": "ใส่รายละเอียดเกี่ยวกับตัวคุณสำหรับ LLMs ของคุณให้จดจำ", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "ซ่อน", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "วันนี้ฉันจะช่วยอะไรคุณได้บ้าง?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "ไม่พบโมเดล {{modelId}}", "Model {{modelName}} is not vision capable": "โมเดล {{modelName}} ไม่มีคุณสมบัติวิสชั่น", "Model {{name}} is now {{status}}": "โมเดล {{name}} ขณะนี้ {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "สร้างโมเดลสำเร็จ!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "ตรวจพบเส้นทางระบบไฟล์ของโมเดล ต้องการชื่อย่อของโมเดลสำหรับการอัปเดต ไม่สามารถดำเนินการต่อได้", @@ -712,6 +717,7 @@ "Models": "โมเดล", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "เพิ่มเติม", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "พรอมต์", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "", @@ -968,9 +975,11 @@ "Share": "แชร์", "Share Chat": "แชร์แชท", "Share to Open WebUI Community": "แชร์ไปยังชุมชน OpenWebUI", + "Sharing Permissions": "", "Show": "แสดง", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "แสดงรายละเอียดผู้ดูแลระบบในหน้าจอรอการอนุมัติบัญชี", + "Show Model": "", "Show shortcuts": "แสดงทางลัด", "Show your support!": "แสดงการสนับสนุนของคุณ!", "Showcased creativity": "แสดงความคิดสร้างสรรค์", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "เครื่องมือมีระบบการเรียกใช้ฟังก์ชันที่สามารถดำเนินการโค้ดใดๆ ได้", "Tools have a function calling system that allows arbitrary code execution.": "เครื่องมือมีระบบการเรียกใช้ฟังก์ชันที่สามารถดำเนินการโค้ดใดๆ ได้", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/tk-TW/translation.json b/src/lib/i18n/locales/tk-TW/translation.json index 89846bc471a..d02bdaba7c3 100644 --- a/src/lib/i18n/locales/tk-TW/translation.json +++ b/src/lib/i18n/locales/tk-TW/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "", "Enabled": "", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "", "Enter {{role}} message here": "", "Enter a detail about yourself for your LLMs to recall": "", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "", "Model {{modelName}} is not vision capable": "", "Model {{name}} is now {{status}}": "", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "", @@ -712,6 +717,7 @@ "Models": "", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "", "Pull a model from Ollama.com": "", @@ -968,9 +975,11 @@ "Share": "", "Share Chat": "", "Share to Open WebUI Community": "", + "Sharing Permissions": "", "Show": "", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "", + "Show Model": "", "Show shortcuts": "", "Show your support!": "", "Showcased creativity": "", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "", "Tools have a function calling system that allows arbitrary code execution.": "", + "Tools Public Sharing": "", "Top K": "", "Top K Reranker": "", "Top P": "", diff --git a/src/lib/i18n/locales/tr-TR/translation.json b/src/lib/i18n/locales/tr-TR/translation.json index d501f519931..a1cb0e9d3c5 100644 --- a/src/lib/i18n/locales/tr-TR/translation.json +++ b/src/lib/i18n/locales/tr-TR/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Yeni Kayıtları Etkinleştir", "Enabled": "Etkin", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV dosyanızın şu sırayla 4 sütun içerdiğinden emin olun: İsim, E-posta, Şifre, Rol.", "Enter {{role}} message here": "Buraya {{role}} mesajını girin", "Enter a detail about yourself for your LLMs to recall": "LLM'lerinizin hatırlaması için kendiniz hakkında bir bilgi girin", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Gizle", + "Hide Model": "", "Home": "", "Host": "Ana bilgisayar", "How can I help you today?": "Bugün size nasıl yardımcı olabilirim?", @@ -628,6 +630,7 @@ "Knowledge Access": "Bilgi Erişimi", "Knowledge created successfully.": "Bilgi başarıyla oluşturuldu.", "Knowledge deleted successfully.": "Bilgi başarıyla silindi.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Bilgi başarıyla sıfırlandı.", "Knowledge updated successfully": "Bilgi başarıyla güncellendi", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "{{modelId}} bulunamadı", "Model {{modelName}} is not vision capable": "Model {{modelName}} görüntü yeteneğine sahip değil", "Model {{name}} is now {{status}}": "{{name}} modeli artık {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Model görüntü girdilerini kabul eder", "Model created successfully!": "Model başarıyla oluşturuldu!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model dosya sistemi yolu algılandı. Güncelleme için model kısa adı gerekli, devam edilemiyor.", @@ -712,6 +717,7 @@ "Models": "Modeller", "Models Access": "Modellere Erişim", "Models configuration saved successfully": "Modellerin yapılandırması başarıyla kaydedildi", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API Anahtarı", "more": "daha fazla", "More": "Daha Fazla", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Prompt başarıyla güncellendi", "Prompts": "Promptlar", "Prompts Access": "Promptlara Erişim", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com'dan \"{{searchValue}}\" çekin", "Pull a model from Ollama.com": "Ollama.com'dan bir model çekin", @@ -968,9 +975,11 @@ "Share": "Paylaş", "Share Chat": "Sohbeti Paylaş", "Share to Open WebUI Community": "OpenWebUI Topluluğu ile Paylaş", + "Sharing Permissions": "", "Show": "Göster", "Show \"What's New\" modal on login": "Girişte \"Yenilikler\" modalını göster", "Show Admin Details in Account Pending Overlay": "Yönetici Ayrıntılarını Hesap Bekliyor Ekranında Göster", + "Show Model": "", "Show shortcuts": "Kısayolları göster", "Show your support!": "Desteğinizi gösterin!", "Showcased creativity": "Sergilenen yaratıcılık", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Araçlar, keyfi kod yürütme izni veren bir fonksiyon çağırma sistemine sahiptir", "Tools have a function calling system that allows arbitrary code execution.": "Araçlar, keyfi kod yürütme izni veren bir fonksiyon çağırma sistemine sahiptir.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index d1dc4d2831e..bb2be72ba3e 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Увімкнути вибірку Mirostat для контролю перплексії.", "Enable New Sign Ups": "Дозволити нові реєстрації", "Enabled": "Увімкнено", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Переконайтеся, що ваш CSV-файл містить 4 колонки в такому порядку: Ім'я, Email, Пароль, Роль.", "Enter {{role}} message here": "Введіть повідомлення {{role}} тут", "Enter a detail about yourself for your LLMs to recall": "Введіть відомості про себе для запам'ятовування вашими LLM.", @@ -569,6 +570,7 @@ "Hex Color": "Шістнадцятковий колір", "Hex Color - Leave empty for default color": "Шістнадцятковий колір — залиште порожнім для кольору за замовчуванням", "Hide": "Приховати", + "Hide Model": "", "Home": "Головна", "Host": "Хост", "How can I help you today?": "Чим я можу допомогти вам сьогодні?", @@ -628,6 +630,7 @@ "Knowledge Access": "Доступ до знань", "Knowledge created successfully.": "Знання успішно створено.", "Knowledge deleted successfully.": "Знання успішно видалено.", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "Знання успішно скинуто.", "Knowledge updated successfully": "Знання успішно оновлено", "Kokoro.js (Browser)": "Kokoro.js (Браузер)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Модель {{modelId}} не знайдено", "Model {{modelName}} is not vision capable": "Модель {{modelName}} не здатна бачити", "Model {{name}} is now {{status}}": "Модель {{name}} тепер має {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "Модель приймає зображеня", "Model created successfully!": "Модель створено успішно!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Виявлено шлях до файлової системи моделі. Для оновлення потрібно вказати коротке ім'я моделі, не вдасться продовжити.", @@ -712,6 +717,7 @@ "Models": "Моделі", "Models Access": "Доступ до моделей", "Models configuration saved successfully": "Конфігурацію моделей успішно збережено", + "Models Public Sharing": "", "Mojeek Search API Key": "API ключ для пошуку Mojeek", "more": "більше", "More": "Більше", @@ -836,6 +842,7 @@ "Prompt updated successfully": "Підказку успішно оновлено", "Prompts": "Промти", "Prompts Access": "Доступ до підказок", + "Prompts Public Sharing": "", "Public": "Публічний", "Pull \"{{searchValue}}\" from Ollama.com": "Завантажити \"{{searchValue}}\" з Ollama.com", "Pull a model from Ollama.com": "Завантажити модель з Ollama.com", @@ -968,9 +975,11 @@ "Share": "Поділитися", "Share Chat": "Поділитися чатом", "Share to Open WebUI Community": "Поділитися зі спільнотою OpenWebUI", + "Sharing Permissions": "", "Show": "Показати", "Show \"What's New\" modal on login": "Показати модальне вікно \"Що нового\" під час входу", "Show Admin Details in Account Pending Overlay": "Відобразити дані адміна у вікні очікування облікового запису", + "Show Model": "", "Show shortcuts": "Показати клавіатурні скорочення", "Show your support!": "Підтримайте нас!", "Showcased creativity": "Продемонстрований креатив", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "Підказка для виклику функцій інструментів", "Tools have a function calling system that allows arbitrary code execution": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду", "Tools have a function calling system that allows arbitrary code execution.": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/ur-PK/translation.json b/src/lib/i18n/locales/ur-PK/translation.json index 2b119720ad2..0615123529c 100644 --- a/src/lib/i18n/locales/ur-PK/translation.json +++ b/src/lib/i18n/locales/ur-PK/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "نئے سائن اپس کو فعال کریں", "Enabled": "فعال کردیا گیا ہے", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "یقینی بنائیں کہ آپ کی CSV فائل میں 4 کالم اس ترتیب میں شامل ہوں: نام، ای میل، پاس ورڈ، کردار", "Enter {{role}} message here": "یہاں {{کردار}} پیغام درج کریں", "Enter a detail about yourself for your LLMs to recall": "اپنی ذات کے بارے میں کوئی تفصیل درج کریں تاکہ آپ کے LLMs اسے یاد رکھ سکیں", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "چھپائیں", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "میں آج آپ کی کس طرح مدد کر سکتا ہوں؟", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "علم کامیابی سے تخلیق کیا گیا", "Knowledge deleted successfully.": "معلومات کامیابی سے حذف ہو گئیں", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "علم کو کامیابی کے ساتھ دوبارہ ترتیب دیا گیا", "Knowledge updated successfully": "علم کامیابی سے تازہ کر دیا گیا ہے", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "ماڈل {{modelId}} نہیں ملا", "Model {{modelName}} is not vision capable": "ماڈل {{modelName}} بصری صلاحیت نہیں رکھتا", "Model {{name}} is now {{status}}": "ماڈل {{name}} اب {{status}} ہے", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "ماڈل تصویری ان پٹس قبول کرتا ہے", "Model created successfully!": "ماڈل کامیابی سے تیار کر دیا گیا!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "ماڈل فائل سسٹم کا راستہ مل گیا ماڈل کا مختصر نام اپڈیٹ کے لیے ضروری ہے، جاری نہیں رہ سکتا", @@ -712,6 +717,7 @@ "Models": "ماڈلز", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "مزید", "More": "مزید", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "پرومپٹس", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com سے \"{{searchValue}}\" کو کھینچیں", "Pull a model from Ollama.com": "Ollama.com سے ماڈل حاصل کریں", @@ -968,9 +975,11 @@ "Share": "اشتراک کریں", "Share Chat": "چیٹ شیئر کریں", "Share to Open WebUI Community": "اوپن ویب یوآئی کمیونٹی کے ساتھ شیئر کریں\n", + "Sharing Permissions": "", "Show": "دکھائیں", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "اکاؤنٹ پینڈنگ اوورلے میں ایڈمن کی تفصیلات دکھائیں", + "Show Model": "", "Show shortcuts": "شارٹ کٹ دکھائیں", "Show your support!": "اپنی حمایت دکھائیں!", "Showcased creativity": "نمائش شدہ تخلیقی صلاحیتیں", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "ٹولز کے پاس ایک فنکشن کالنگ سسٹم ہے جو اختیاری کوڈ کے نفاذ کی اجازت دیتا ہے", "Tools have a function calling system that allows arbitrary code execution.": "ٹولز کے پاس ایک فنکشن کالنگ سسٹم ہے جو اختیاری کوڈ کی عمل درآمد کی اجازت دیتا ہے", + "Tools Public Sharing": "", "Top K": "اوپر کے K", "Top K Reranker": "", "Top P": "ٹاپ پی", diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index c90e32d3495..c8aaf5e3938 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "", "Enable New Sign Ups": "Cho phép đăng ký mới", "Enabled": "Đã bật", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Đảm bảo tệp CSV của bạn bao gồm 4 cột theo thứ tự sau: Name, Email, Password, Role.", "Enter {{role}} message here": "Nhập yêu cầu của {{role}} ở đây", "Enter a detail about yourself for your LLMs to recall": "Nhập chi tiết về bản thân của bạn để LLMs có thể nhớ", @@ -569,6 +570,7 @@ "Hex Color": "", "Hex Color - Leave empty for default color": "", "Hide": "Ẩn", + "Hide Model": "", "Home": "", "Host": "", "How can I help you today?": "Tôi có thể giúp gì cho bạn hôm nay?", @@ -628,6 +630,7 @@ "Knowledge Access": "", "Knowledge created successfully.": "", "Knowledge deleted successfully.": "", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "", "Knowledge updated successfully": "", "Kokoro.js (Browser)": "", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "Không tìm thấy Mô hình {{modelId}}", "Model {{modelName}} is not vision capable": "Model {{modelName}} không có khả năng nhìn", "Model {{name}} is now {{status}}": "Model {{name}} bây giờ là {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "", "Model created successfully!": "Model đã được tạo thành công", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Đường dẫn hệ thống tệp mô hình được phát hiện. Tên viết tắt mô hình là bắt buộc để cập nhật, không thể tiếp tục.", @@ -712,6 +717,7 @@ "Models": "Mô hình", "Models Access": "", "Models configuration saved successfully": "", + "Models Public Sharing": "", "Mojeek Search API Key": "", "more": "", "More": "Thêm", @@ -836,6 +842,7 @@ "Prompt updated successfully": "", "Prompts": "Prompt", "Prompts Access": "", + "Prompts Public Sharing": "", "Public": "", "Pull \"{{searchValue}}\" from Ollama.com": "Tải \"{{searchValue}}\" từ Ollama.com", "Pull a model from Ollama.com": "Tải mô hình từ Ollama.com", @@ -968,9 +975,11 @@ "Share": "Chia sẻ", "Share Chat": "Chia sẻ Chat", "Share to Open WebUI Community": "Chia sẻ đến Cộng đồng OpenWebUI", + "Sharing Permissions": "", "Show": "Hiển thị", "Show \"What's New\" modal on login": "", "Show Admin Details in Account Pending Overlay": "Hiển thị thông tin của Quản trị viên trên màn hình hiển thị Tài khoản đang chờ xử lý", + "Show Model": "", "Show shortcuts": "Hiển thị phím tắt", "Show your support!": "Thể hiện sự ủng hộ của bạn!", "Showcased creativity": "Thể hiện sự sáng tạo", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "", "Tools have a function calling system that allows arbitrary code execution": "Các Tools có hệ thống gọi function cho phép thực thi mã tùy ý", "Tools have a function calling system that allows arbitrary code execution.": "Các Tools có hệ thống gọi function cho phép thực thi mã tùy ý.", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "", "Top P": "Top P", diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 2d0bc26ffe0..eeeff3d4769 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "启用Mirostat采样以控制困惑度", "Enable New Sign Ups": "允许新用户注册", "Enabled": "启用", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。", "Enter {{role}} message here": "在此处输入 {{role}} 的对话内容", "Enter a detail about yourself for your LLMs to recall": "输入一个关于你自己的详细信息,方便你的大语言模型记住这些内容", @@ -569,6 +570,7 @@ "Hex Color": "十六进制颜色代码", "Hex Color - Leave empty for default color": "十六进制颜色代码 - 留空使用默认颜色", "Hide": "隐藏", + "Hide Model": "", "Home": "主页", "Host": "主机", "How can I help you today?": "有什么我能帮您的吗?", @@ -628,6 +630,7 @@ "Knowledge Access": "访问知识库", "Knowledge created successfully.": "知识成功创建", "Knowledge deleted successfully.": "知识成功删除", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "知识成功重置", "Knowledge updated successfully": "知识成功更新", "Kokoro.js (Browser)": "Kokoro.js (Browser)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "未找到模型 {{modelId}}", "Model {{modelName}} is not vision capable": "模型 {{modelName}} 不支持视觉能力", "Model {{name}} is now {{status}}": "模型 {{name}} 现在是 {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "模型接受图像输入", "Model created successfully!": "模型创建成功!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "检测到模型文件系统路径,无法继续进行。更新操作需要提供模型简称。", @@ -712,6 +717,7 @@ "Models": "模型", "Models Access": "访问模型列表", "Models configuration saved successfully": "模型配置保存成功", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek Search API 密钥", "more": "更多", "More": "更多", @@ -836,6 +842,7 @@ "Prompt updated successfully": "提示词更新成功", "Prompts": "提示词", "Prompts Access": "访问提示词", + "Prompts Public Sharing": "", "Public": "公共", "Pull \"{{searchValue}}\" from Ollama.com": "从 Ollama.com 拉取 \"{{searchValue}}\"", "Pull a model from Ollama.com": "从 Ollama.com 拉取一个模型", @@ -968,9 +975,11 @@ "Share": "分享", "Share Chat": "分享对话", "Share to Open WebUI Community": "分享到 OpenWebUI 社区", + "Sharing Permissions": "", "Show": "显示", "Show \"What's New\" modal on login": "在登录时显示“更新内容”弹窗", "Show Admin Details in Account Pending Overlay": "在用户待激活界面中显示管理员邮箱等详细信息", + "Show Model": "", "Show shortcuts": "显示快捷方式", "Show your support!": "表达你的支持!", "Showcased creativity": "很有创意", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "工具函数调用提示词", "Tools have a function calling system that allows arbitrary code execution": "注意:工具有权执行任意代码", "Tools have a function calling system that allows arbitrary code execution.": "注意:工具有权执行任意代码。", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "Top K Reranker", "Top P": "Top P", diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 750bb972527..25ff67a5d5f 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -379,6 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "啟用 Mirostat 取樣以控制 perplexity。", "Enable New Sign Ups": "允許新使用者註冊", "Enabled": "已啟用", + "Enforce Temporary Chat": "", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "請確認您的 CSV 檔案包含以下 4 個欄位,並按照此順序排列:姓名、電子郵件、密碼、角色。", "Enter {{role}} message here": "在此輸入 {{role}} 訊息", "Enter a detail about yourself for your LLMs to recall": "輸入有關您的詳細資訊,讓您的大型語言模型可以回想起來", @@ -569,6 +570,7 @@ "Hex Color": "Hex 顔色", "Hex Color - Leave empty for default color": "Hex 顔色 —— 留空以使用預設顔色", "Hide": "隱藏", + "Hide Model": "", "Home": "首頁", "Host": "主機", "How can I help you today?": "今天我能為您做些什麼?", @@ -628,6 +630,7 @@ "Knowledge Access": "知識存取", "Knowledge created successfully.": "知識建立成功。", "Knowledge deleted successfully.": "知識刪除成功。", + "Knowledge Public Sharing": "", "Knowledge reset successfully.": "知識重設成功。", "Knowledge updated successfully": "知識更新成功", "Kokoro.js (Browser)": "Kokoro.js (Browser)", @@ -697,6 +700,8 @@ "Model {{modelId}} not found": "找不到模型 {{modelId}}", "Model {{modelName}} is not vision capable": "模型 {{modelName}} 不具備視覺能力", "Model {{name}} is now {{status}}": "模型 {{name}} 現在狀態為 {{status}}", + "Model {{name}} is now hidden": "", + "Model {{name}} is now visible": "", "Model accepts image inputs": "模型接受影像輸入", "Model created successfully!": "成功建立模型!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "偵測到模型檔案系統路徑。更新需要模型簡稱,因此無法繼續。", @@ -712,6 +717,7 @@ "Models": "模型", "Models Access": "模型存取", "Models configuration saved successfully": "模型設定儲存成功", + "Models Public Sharing": "", "Mojeek Search API Key": "Mojeek 搜尋 API 金鑰", "more": "更多", "More": "更多", @@ -836,6 +842,7 @@ "Prompt updated successfully": "提示詞更新成功", "Prompts": "提示詞", "Prompts Access": "提示詞存取", + "Prompts Public Sharing": "", "Public": "公開", "Pull \"{{searchValue}}\" from Ollama.com": "從 Ollama.com 下載「{{searchValue}}」", "Pull a model from Ollama.com": "從 Ollama.com 下載模型", @@ -968,9 +975,11 @@ "Share": "分享", "Share Chat": "分享對話", "Share to Open WebUI Community": "分享到 OpenWebUI 社群", + "Sharing Permissions": "", "Show": "顯示", "Show \"What's New\" modal on login": "登入時顯示「新功能」對話框", "Show Admin Details in Account Pending Overlay": "在帳號待審覆蓋層中顯示管理員詳細資訊", + "Show Model": "", "Show shortcuts": "顯示快捷鍵", "Show your support!": "表達您的支持!", "Showcased creativity": "展現創意", @@ -1087,6 +1096,7 @@ "Tools Function Calling Prompt": "工具函式呼叫提示詞", "Tools have a function calling system that allows arbitrary code execution": "工具具有允許執行任意程式碼的函式呼叫系統", "Tools have a function calling system that allows arbitrary code execution.": "工具具有允許執行任意程式碼的函式呼叫系統。", + "Tools Public Sharing": "", "Top K": "Top K", "Top K Reranker": "Top K Reranker", "Top P": "Top P", From 3662ecdeab2cc7ab294d5575acb51812181286ad Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:34:42 -0700 Subject: [PATCH 488/623] doc: changelog --- CHANGELOG.md | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index da4046e73fb..7f4b598ef8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,54 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.0] - 2025-03-31 + +### Added + +- 🧩 **External Tool Server Support via OpenAPI**: Connect Open WebUI to any OpenAPI-compatible REST server instantly—offering immediate integration with thousands of developer tools, SDKs, and SaaS systems for powerful extensibility. Learn more: https://github.com/open-webui/openapi-servers +- 🛠️ **MCP Tool Support via MCPO**: You can now convert and expose your internal MCP tools as interoperable OpenAPI HTTP servers within Open WebUI for seamless, plug-n-play AI toolchain creation. Learn more: https://github.com/open-webui/mcpo +- 📨 **/messages Chat API Endpoint Support**: For power users building external AI systems, new endpoints allow precise control of messages asynchronously—feed long-running external responses into Open WebUI chats without coupling with the frontend. +- 📝 **Client-Side PDF Generation**: PDF exports are now generated fully client-side for much faster performance and drastically improved output quality—perfect for saving conversations or documents. +- 💼 **Enforced Temporary Chats Mode**: Admins can now enforce temporary chat sessions by default to align with stringent data retention and compliance requirements. +- 🌍 **Public Resource Sharing Permission Controls**: Fine-grained user group permissions now allow enabling/disabling public sharing for models, knowledge, prompts, and tools—ideal for privacy, team control, and internal deployments. +- 📦 **Custom pip Options for Tools/Functions**: Tools and Functions requirements can now include custom pip installation options—improving compatibility, support for private indexes, and better control over Python environments. +- 🔢 **Editable Message Counter**: You can now double-click the message count number and jump straight to editing the index—quickly navigate complex chats or regenerate specific messages precisely. +- 🧠 **Embedding Prefix Support Added**: Add custom prefixes to your embeddings for instruct-style tokens, enabling stronger model alignment and more consistent RAG performance. +- 🙈 **Ability to Hide Base Models**: Optionally hide base models from the UI, helping users streamline model visibility and limit access to only usable endpoints. +- 🗃️ **Redis Sentinel Support Added**: Enhance deployment redundancy with support for Redis Sentinel for highly available, failover-safe Redis-based caching or pub/sub. +- 📚 **JSON Schema Format for Ollama**: Added support for defining the format using JSON schema in Ollama-compatible models, improving flexibility and validation of model outputs. +- 🔍 **Chat Sidebar Search "Clear” Button**: Quickly clear search filters in chat sidebar using the new ✖️ button—streamline your chat navigation with one click. +- 🗂️ **Auto-Focus + Enter Submit for Folder Name**: When creating a new folder, the system automatically enters rename mode with name preselected—simplifying your org workflow. +- 🧱 **Markdown Alerts Rendering**: Blockquotes with syntax hinting (e.g. ⚠️, ℹ️, ✅) now render styled Markdown alert banners, making messages and documentation more visually structured. +- 🔁 **Hybrid Search Runs in Parallel Now**: Hybrid (BM25 + embedding) search components now run in parallel—dramatically reducing response times and speeding up document retrieval. +- 📋 **Cleaner UI for Tool Call Display**: Optimized the visual layout of called tools inside chat messages for better clarity and reduced visual clutter. +- 🧪 **Playwright Timeout Now Configurable**: Default timeout for Playwright processes is now shorter and adjustable via environment variables—making web scraping more robust and tunable to environments. +- 📈 **OpenTelemetry Support for Observability**: Open WebUI now integrates with OpenTelemetry, allowing you to connect with tools like Grafana, Jaeger, or Prometheus for detailed performance insights and real-time visibility—entirely opt-in and fully self-hosted. Even if enabled, no data is ever sent to us, ensuring your privacy and ownership over all telemetry data. +- 🛠 **General UI Enhancements & UX Polish**: Numerous refinements across sidebar, code blocks, modal interactions, button alignment, scrollbar visibility, and folder behavior improve overall fluidity and usability of the interface. +- 🧱 **General Backend Refactoring**: Numerous backend components have been refactored to improve stability, maintainability, and performance—ensuring a more consistent and reliable system across all features. +- 🌍 **Internationalization Language Support Updates**: Added Estonian 🇪🇪 and Galician 🇬🇶 languages, improved Spanish 🇪🇸 (fully revised), Traditional Chinese 🇹🇼, Simplified Chinese 🇨🇳, Turkish 🇹🇷, Catalan 🇨🇦, Ukrainian 🇺🇦, and German 🇩🇪 for a more localized and inclusive interface. + +### Fixed + +- 🧑‍💻 **Firefox Input Height Bug**: Text input in Firefox now maintains proper height, ensuring message boxes look consistent and behave predictably. +- 🧾 **Tika Blank Line Bug**: PDFs processed with Apache Tika 3.1.0.0 no longer introduce excessive blank lines—improving RAG output quality and visual cleanliness. +- 🧪 **CSV Loader Encoding Issues**: CSV files with unknown encodings now automatically detect character sets, resolving import errors in non-UTF-8 datasets. +- ✅ **LDAP Auth Config Fix**: Path to certificate file is now optional for LDAP setups, fixing authentication trouble for users without preconfigured cert paths. +- 📥 **File Deletion in Bypass Mode**: Resolved issue where files couldn’t be deleted from knowledge when “bypass embedding” mode was enabled. +- 🧩 **Hybrid Search Result Sorting & Deduplication Fixed**: Fixed citation and sorting issues in RAG hybrid and reranker modes, ensuring retrieved documents are shown in correct order per score. +- 🧷 **Model Export/Import Broken for a Single Model**: Fixed bug where individual models couldn’t be exported or re-imported, restoring full portability. +- 📫 **Auth Redirect Fix**: Logged-in users are now routed properly without unnecessary login prompts when already authenticated. + +### Changed + +- 🧠 **Prompt Autocompletion Disabled By Default**: Autocomplete suggestions while typing are now disabled unless explicitly re-enabled in user preferences—reduces distractions while composing prompts for advanced users. +- 🧾 **Normalize Citation Numbering**: Source citations now properly begin from "1" instead of "0"—improving consistency and professional presentation in AI outputs. +- 📚 **Improved Error Handling from Pipelines**: Pipelines now show the actual returned error message from failed tasks rather than generic "Connection closed"—making debugging far more user-friendly. + +### Removed + +- 🧾 **ENABLE_AUDIT_LOGS Setting Removed**: Deprecated setting “ENABLE_AUDIT_LOGS” has been fully removed—now controlled via “AUDIT_LOG_LEVEL” instead. + ## [0.5.20] - 2025-03-05 ### Added From da561c50d093f31a6a0c7671a8940f6a0701fa0e Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:36:08 -0700 Subject: [PATCH 489/623] doc: wording --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f4b598ef8c..cc5895ce96f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - 🧩 **External Tool Server Support via OpenAPI**: Connect Open WebUI to any OpenAPI-compatible REST server instantly—offering immediate integration with thousands of developer tools, SDKs, and SaaS systems for powerful extensibility. Learn more: https://github.com/open-webui/openapi-servers -- 🛠️ **MCP Tool Support via MCPO**: You can now convert and expose your internal MCP tools as interoperable OpenAPI HTTP servers within Open WebUI for seamless, plug-n-play AI toolchain creation. Learn more: https://github.com/open-webui/mcpo +- 🛠️ **MCP Server Support via MCPO**: You can now convert and expose your internal MCP tools as interoperable OpenAPI HTTP servers within Open WebUI for seamless, plug-n-play AI toolchain creation. Learn more: https://github.com/open-webui/mcpo - 📨 **/messages Chat API Endpoint Support**: For power users building external AI systems, new endpoints allow precise control of messages asynchronously—feed long-running external responses into Open WebUI chats without coupling with the frontend. - 📝 **Client-Side PDF Generation**: PDF exports are now generated fully client-side for much faster performance and drastically improved output quality—perfect for saving conversations or documents. - 💼 **Enforced Temporary Chats Mode**: Admins can now enforce temporary chat sessions by default to align with stringent data retention and compliance requirements. From dca68871e6cd4c84d322e59ab9716f2283111120 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:39:30 -0700 Subject: [PATCH 490/623] doc: wording --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc5895ce96f..8a8f7064cb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 📝 **Client-Side PDF Generation**: PDF exports are now generated fully client-side for much faster performance and drastically improved output quality—perfect for saving conversations or documents. - 💼 **Enforced Temporary Chats Mode**: Admins can now enforce temporary chat sessions by default to align with stringent data retention and compliance requirements. - 🌍 **Public Resource Sharing Permission Controls**: Fine-grained user group permissions now allow enabling/disabling public sharing for models, knowledge, prompts, and tools—ideal for privacy, team control, and internal deployments. -- 📦 **Custom pip Options for Tools/Functions**: Tools and Functions requirements can now include custom pip installation options—improving compatibility, support for private indexes, and better control over Python environments. +- 📦 **Custom pip Options for Tools/Functions**: You can now specify custom pip installation options with "PIP_OPTIONS", "PIP_PACKAGE_INDEX_OPTIONS" environment variables—improving compatibility, support for private indexes, and better control over Python environments. - 🔢 **Editable Message Counter**: You can now double-click the message count number and jump straight to editing the index—quickly navigate complex chats or regenerate specific messages precisely. - 🧠 **Embedding Prefix Support Added**: Add custom prefixes to your embeddings for instruct-style tokens, enabling stronger model alignment and more consistent RAG performance. - 🙈 **Ability to Hide Base Models**: Optionally hide base models from the UI, helping users streamline model visibility and limit access to only usable endpoints. From 8f8d8ba27d4170790a6e8aa35ad3343374adb398 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:40:49 -0700 Subject: [PATCH 491/623] doc: changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a8f7064cb1..c452a784771 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 📦 **Custom pip Options for Tools/Functions**: You can now specify custom pip installation options with "PIP_OPTIONS", "PIP_PACKAGE_INDEX_OPTIONS" environment variables—improving compatibility, support for private indexes, and better control over Python environments. - 🔢 **Editable Message Counter**: You can now double-click the message count number and jump straight to editing the index—quickly navigate complex chats or regenerate specific messages precisely. - 🧠 **Embedding Prefix Support Added**: Add custom prefixes to your embeddings for instruct-style tokens, enabling stronger model alignment and more consistent RAG performance. -- 🙈 **Ability to Hide Base Models**: Optionally hide base models from the UI, helping users streamline model visibility and limit access to only usable endpoints. +- 🙈 **Ability to Hide Base Models**: Optionally hide base models from the UI, helping users streamline model visibility and limit access to only usable endpoints.. +- 📚 **Docling Content Extraction Support**: Open WebUI now supports Docling as a content extraction engine, enabling smarter and more accurate parsing of complex file formats—ideal for advanced document understanding and Retrieval-Augmented Generation (RAG) workflows. - 🗃️ **Redis Sentinel Support Added**: Enhance deployment redundancy with support for Redis Sentinel for highly available, failover-safe Redis-based caching or pub/sub. - 📚 **JSON Schema Format for Ollama**: Added support for defining the format using JSON schema in Ollama-compatible models, improving flexibility and validation of model outputs. - 🔍 **Chat Sidebar Search "Clear” Button**: Quickly clear search filters in chat sidebar using the new ✖️ button—streamline your chat navigation with one click. From 13f7a4cf568b355798b22f6e5d0d16a76a80ab39 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:43:27 -0700 Subject: [PATCH 492/623] doc: wording --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c452a784771..da1a1f10114 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 🧩 **External Tool Server Support via OpenAPI**: Connect Open WebUI to any OpenAPI-compatible REST server instantly—offering immediate integration with thousands of developer tools, SDKs, and SaaS systems for powerful extensibility. Learn more: https://github.com/open-webui/openapi-servers - 🛠️ **MCP Server Support via MCPO**: You can now convert and expose your internal MCP tools as interoperable OpenAPI HTTP servers within Open WebUI for seamless, plug-n-play AI toolchain creation. Learn more: https://github.com/open-webui/mcpo - 📨 **/messages Chat API Endpoint Support**: For power users building external AI systems, new endpoints allow precise control of messages asynchronously—feed long-running external responses into Open WebUI chats without coupling with the frontend. -- 📝 **Client-Side PDF Generation**: PDF exports are now generated fully client-side for much faster performance and drastically improved output quality—perfect for saving conversations or documents. +- 📝 **Client-Side PDF Generation**: PDF exports are now generated fully client-side for drastically improved output quality—perfect for saving conversations or documents. - 💼 **Enforced Temporary Chats Mode**: Admins can now enforce temporary chat sessions by default to align with stringent data retention and compliance requirements. - 🌍 **Public Resource Sharing Permission Controls**: Fine-grained user group permissions now allow enabling/disabling public sharing for models, knowledge, prompts, and tools—ideal for privacy, team control, and internal deployments. - 📦 **Custom pip Options for Tools/Functions**: You can now specify custom pip installation options with "PIP_OPTIONS", "PIP_PACKAGE_INDEX_OPTIONS" environment variables—improving compatibility, support for private indexes, and better control over Python environments. From 1b7c125f009d7c431fbdd7b677769e15ae19d0d0 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 18:47:08 -0700 Subject: [PATCH 493/623] refac --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index da1a1f10114..f6e8f7d297a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - 📈 **OpenTelemetry Support for Observability**: Open WebUI now integrates with OpenTelemetry, allowing you to connect with tools like Grafana, Jaeger, or Prometheus for detailed performance insights and real-time visibility—entirely opt-in and fully self-hosted. Even if enabled, no data is ever sent to us, ensuring your privacy and ownership over all telemetry data. - 🛠 **General UI Enhancements & UX Polish**: Numerous refinements across sidebar, code blocks, modal interactions, button alignment, scrollbar visibility, and folder behavior improve overall fluidity and usability of the interface. - 🧱 **General Backend Refactoring**: Numerous backend components have been refactored to improve stability, maintainability, and performance—ensuring a more consistent and reliable system across all features. -- 🌍 **Internationalization Language Support Updates**: Added Estonian 🇪🇪 and Galician 🇬🇶 languages, improved Spanish 🇪🇸 (fully revised), Traditional Chinese 🇹🇼, Simplified Chinese 🇨🇳, Turkish 🇹🇷, Catalan 🇨🇦, Ukrainian 🇺🇦, and German 🇩🇪 for a more localized and inclusive interface. +- 🌍 **Internationalization Language Support Updates**: Added Estonian and Galician languages, improved Spanish (fully revised), Traditional Chinese, Simplified Chinese, Turkish, Catalan, Ukrainian, and German for a more localized and inclusive interface. ### Fixed From 1d30d8a13adfc3081204b94d058dee08af1256d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 02:22:11 +0000 Subject: [PATCH 494/623] build(deps): bump python-multipart from 0.0.18 to 0.0.20 in /backend Bumps [python-multipart](https://github.com/Kludex/python-multipart) from 0.0.18 to 0.0.20. - [Release notes](https://github.com/Kludex/python-multipart/releases) - [Changelog](https://github.com/Kludex/python-multipart/blob/master/CHANGELOG.md) - [Commits](https://github.com/Kludex/python-multipart/compare/0.0.18...0.0.20) --- updated-dependencies: - dependency-name: python-multipart dependency-version: 0.0.20 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- backend/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/requirements.txt b/backend/requirements.txt index ca2ea50609d..278898a9d48 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,7 +1,7 @@ fastapi==0.115.7 uvicorn[standard]==0.34.0 pydantic==2.10.6 -python-multipart==0.0.18 +python-multipart==0.0.20 python-socketio==5.11.3 python-jose==3.4.0 From fc5c8dbe3f9354e59f3485482906db6e3641ad3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 02:22:13 +0000 Subject: [PATCH 495/623] build(deps): bump einops from 0.8.0 to 0.8.1 in /backend Bumps [einops](https://github.com/arogozhnikov/einops) from 0.8.0 to 0.8.1. - [Release notes](https://github.com/arogozhnikov/einops/releases) - [Commits](https://github.com/arogozhnikov/einops/compare/v0.8.0...v0.8.1) --- updated-dependencies: - dependency-name: einops dependency-version: 0.8.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- backend/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/requirements.txt b/backend/requirements.txt index ca2ea50609d..56f5d4c39cc 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -55,7 +55,7 @@ elasticsearch==8.17.1 transformers sentence-transformers==3.3.1 colbert-ai==0.2.21 -einops==0.8.0 +einops==0.8.1 ftfy==6.2.3 From bb0e11bb970a8bb1044f039d749474f4d026dee7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 02:22:16 +0000 Subject: [PATCH 496/623] build(deps): bump pypandoc from 1.13 to 1.15 in /backend Bumps [pypandoc](https://github.com/JessicaTegner/pypandoc) from 1.13 to 1.15. - [Release notes](https://github.com/JessicaTegner/pypandoc/releases) - [Changelog](https://github.com/JessicaTegner/pypandoc/blob/master/release.md) - [Commits](https://github.com/JessicaTegner/pypandoc/compare/v1.13...v1.15) --- updated-dependencies: - dependency-name: pypandoc dependency-version: '1.15' dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- backend/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/requirements.txt b/backend/requirements.txt index ca2ea50609d..e0d041a0066 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -67,7 +67,7 @@ python-pptx==1.0.0 unstructured==0.16.17 nltk==3.9.1 Markdown==3.7 -pypandoc==1.13 +pypandoc==1.15 pandas==2.2.3 openpyxl==3.1.5 pyxlsb==1.0.10 From e0ec2cdeb021cbfcc75cf10eb6cc1fba28fe5b11 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Mon, 31 Mar 2025 20:32:12 -0700 Subject: [PATCH 497/623] refac: $user --- .../components/admin/Settings/Connections.svelte | 2 +- .../components/admin/Settings/Evaluations.svelte | 2 +- src/lib/components/admin/Settings/Images.svelte | 2 +- src/lib/components/admin/Settings/Interface.svelte | 2 +- .../admin/Settings/Models/ManageModelsModal.svelte | 2 +- src/lib/components/channel/Channel.svelte | 2 +- src/lib/components/channel/Messages.svelte | 8 ++++---- src/lib/components/channel/Messages/Message.svelte | 4 ++-- src/lib/components/channel/Navbar.svelte | 4 ++-- src/lib/components/channel/Thread.svelte | 2 +- src/lib/components/chat/Chat.svelte | 10 +++++----- src/lib/components/chat/ChatPlaceholder.svelte | 2 +- src/lib/components/chat/Controls/Controls.svelte | 2 +- .../chat/MessageInput/Commands/Prompts.svelte | 2 +- .../components/chat/MessageInput/InputMenu.svelte | 2 +- .../chat/Messages/ResponseMessage.svelte | 4 ++-- src/lib/components/chat/ModelSelector.svelte | 2 +- .../components/chat/ModelSelector/Selector.svelte | 2 +- src/lib/components/chat/Navbar.svelte | 6 +++--- src/lib/components/chat/Placeholder.svelte | 2 +- src/lib/components/chat/Settings/Account.svelte | 10 +++++----- src/lib/components/chat/Settings/General.svelte | 2 +- src/lib/components/chat/Settings/Interface.svelte | 2 +- src/lib/components/chat/SettingsModal.svelte | 6 +++--- src/lib/components/layout/Navbar.svelte | 4 ++-- src/lib/components/layout/Sidebar.svelte | 14 +++++++------- src/routes/(app)/+layout.svelte | 12 ++++++------ 27 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/lib/components/admin/Settings/Connections.svelte b/src/lib/components/admin/Settings/Connections.svelte index 2fcfadaec81..ac0566f225d 100644 --- a/src/lib/components/admin/Settings/Connections.svelte +++ b/src/lib/components/admin/Settings/Connections.svelte @@ -136,7 +136,7 @@ }; onMount(async () => { - if ($user.role === 'admin') { + if ($user?.role === 'admin') { let ollamaConfig = {}; let openaiConfig = {}; diff --git a/src/lib/components/admin/Settings/Evaluations.svelte b/src/lib/components/admin/Settings/Evaluations.svelte index cf003504c5f..b46669b526e 100644 --- a/src/lib/components/admin/Settings/Evaluations.svelte +++ b/src/lib/components/admin/Settings/Evaluations.svelte @@ -77,7 +77,7 @@ }; onMount(async () => { - if ($user.role === 'admin') { + if ($user?.role === 'admin') { evaluationConfig = await getConfig(localStorage.token).catch((err) => { toast.error(err); return null; diff --git a/src/lib/components/admin/Settings/Images.svelte b/src/lib/components/admin/Settings/Images.svelte index 88039a0e39d..64fa249dc35 100644 --- a/src/lib/components/admin/Settings/Images.svelte +++ b/src/lib/components/admin/Settings/Images.svelte @@ -176,7 +176,7 @@ }; onMount(async () => { - if ($user.role === 'admin') { + if ($user?.role === 'admin') { const res = await getConfig(localStorage.token).catch((error) => { toast.error(`${error}`); return null; diff --git a/src/lib/components/admin/Settings/Interface.svelte b/src/lib/components/admin/Settings/Interface.svelte index e3542475e6d..adb4fbdf911 100644 --- a/src/lib/components/admin/Settings/Interface.svelte +++ b/src/lib/components/admin/Settings/Interface.svelte @@ -380,7 +380,7 @@
- {#if $user.role === 'admin'} + {#if $user?.role === 'admin'}
diff --git a/src/lib/components/admin/Settings/Models/ManageModelsModal.svelte b/src/lib/components/admin/Settings/Models/ManageModelsModal.svelte index 6b53952e157..117009ddaa5 100644 --- a/src/lib/components/admin/Settings/Models/ManageModelsModal.svelte +++ b/src/lib/components/admin/Settings/Models/ManageModelsModal.svelte @@ -19,7 +19,7 @@ let ollamaConfig = null; onMount(async () => { - if ($user.role === 'admin') { + if ($user?.role === 'admin') { await Promise.all([ (async () => { ollamaConfig = await getOllamaConfig(localStorage.token); diff --git a/src/lib/components/channel/Channel.svelte b/src/lib/components/channel/Channel.svelte index 275f76d29cf..ce2aa54f1c7 100644 --- a/src/lib/components/channel/Channel.svelte +++ b/src/lib/components/channel/Channel.svelte @@ -106,7 +106,7 @@ messages[idx] = data; } } else if (type === 'typing' && event.message_id === null) { - if (event.user.id === $user.id) { + if (event.user.id === $user?.id) { return; } diff --git a/src/lib/components/channel/Messages.svelte b/src/lib/components/channel/Messages.svelte index f8ff2f229c0..e1bc326b375 100644 --- a/src/lib/components/channel/Messages.svelte +++ b/src/lib/components/channel/Messages.svelte @@ -132,7 +132,7 @@ if ( (message?.reactions ?? []) .find((reaction) => reaction.name === name) - ?.user_ids?.includes($user.id) ?? + ?.user_ids?.includes($user?.id) ?? false ) { messages = messages.map((m) => { @@ -140,7 +140,7 @@ const reaction = m.reactions.find((reaction) => reaction.name === name); if (reaction) { - reaction.user_ids = reaction.user_ids.filter((id) => id !== $user.id); + reaction.user_ids = reaction.user_ids.filter((id) => id !== $user?.id); reaction.count = reaction.user_ids.length; if (reaction.count === 0) { @@ -167,12 +167,12 @@ const reaction = m.reactions.find((reaction) => reaction.name === name); if (reaction) { - reaction.user_ids.push($user.id); + reaction.user_ids.push($user?.id); reaction.count = reaction.user_ids.length; } else { m.reactions.push({ name: name, - user_ids: [$user.id], + user_ids: [$user?.id], count: 1 }); } diff --git a/src/lib/components/channel/Messages/Message.svelte b/src/lib/components/channel/Messages/Message.svelte index 0736a25129f..9989388060a 100644 --- a/src/lib/components/channel/Messages/Message.svelte +++ b/src/lib/components/channel/Messages/Message.svelte @@ -106,7 +106,7 @@ {/if} - {#if message.user_id === $user.id || $user.role === 'admin'} + {#if message.user_id === $user?.id || $user?.role === 'admin'}
- {#if $user.role === 'admin' || $user?.permissions.chat?.controls} + {#if $user?.role === 'admin' || $user?.permissions.chat?.controls}
diff --git a/src/lib/components/chat/MessageInput/Commands/Prompts.svelte b/src/lib/components/chat/MessageInput/Commands/Prompts.svelte index 0e7a601e4be..26cf1d36831 100644 --- a/src/lib/components/chat/MessageInput/Commands/Prompts.svelte +++ b/src/lib/components/chat/MessageInput/Commands/Prompts.svelte @@ -86,7 +86,7 @@ if (command.content.includes('{{USER_NAME}}')) { console.log($user); - const name = $user.name || 'User'; + const name = $user?.name || 'User'; text = text.replaceAll('{{USER_NAME}}', name); } diff --git a/src/lib/components/chat/MessageInput/InputMenu.svelte b/src/lib/components/chat/MessageInput/InputMenu.svelte index 07f337dcbf5..27fe2cde298 100644 --- a/src/lib/components/chat/MessageInput/InputMenu.svelte +++ b/src/lib/components/chat/MessageInput/InputMenu.svelte @@ -39,7 +39,7 @@ } let fileUploadEnabled = true; - $: fileUploadEnabled = $user.role === 'admin' || $user?.permissions?.chat?.file_upload; + $: fileUploadEnabled = $user?.role === 'admin' || $user?.permissions?.chat?.file_upload; const init = async () => { if ($_tools === null) { diff --git a/src/lib/components/chat/Messages/ResponseMessage.svelte b/src/lib/components/chat/Messages/ResponseMessage.svelte index a8c2e7c9fad..04a454a0ef2 100644 --- a/src/lib/components/chat/Messages/ResponseMessage.svelte +++ b/src/lib/components/chat/Messages/ResponseMessage.svelte @@ -920,7 +920,7 @@ {#if message.done} {#if !readOnly} - {#if $user.role === 'user' ? ($user?.permissions?.chat?.edit ?? true) : true} + {#if $user?.role === 'user' ? ($user?.permissions?.chat?.edit ?? true) : true}
- {#if $user.role === 'admin' || $user?.permissions.chat?.controls} + {#if $user?.role === 'admin' || $user?.permissions.chat?.controls}
diff --git a/src/lib/components/chat/Settings/Interface.svelte b/src/lib/components/chat/Settings/Interface.svelte index b0a0b79706f..171cfe27ac0 100644 --- a/src/lib/components/chat/Settings/Interface.svelte +++ b/src/lib/components/chat/Settings/Interface.svelte @@ -441,7 +441,7 @@
- {#if $user.role === 'admin'} + {#if $user?.role === 'admin'}
diff --git a/src/lib/components/chat/SettingsModal.svelte b/src/lib/components/chat/SettingsModal.svelte index e3b20c2db72..15bf9c0baeb 100644 --- a/src/lib/components/chat/SettingsModal.svelte +++ b/src/lib/components/chat/SettingsModal.svelte @@ -462,7 +462,7 @@
{$i18n.t('Interface')}
{:else if tabId === 'connections'} - {#if $user.role === 'admin' || ($user.role === 'user' && $config?.features?.enable_direct_connections)} + {#if $user?.role === 'admin' || ($user?.role === 'user' && $config?.features?.enable_direct_connections)}
{:else if tabId === 'admin'} - {#if $user.role === 'admin'} + {#if $user?.role === 'admin'} {/if} diff --git a/src/routes/(app)/+layout.svelte b/src/routes/(app)/+layout.svelte index b68cc67a013..709813ef180 100644 --- a/src/routes/(app)/+layout.svelte +++ b/src/routes/(app)/+layout.svelte @@ -55,9 +55,9 @@ let version; onMount(async () => { - if ($user === undefined) { + if ($user === undefined || $user === null) { await goto('/auth'); - } else if (['user', 'admin'].includes($user.role)) { + } else if (['user', 'admin'].includes($user?.role)) { try { // Check if IndexedDB exists DB = await openDB('Chats', 1); @@ -191,7 +191,7 @@ } }); - if ($user.role === 'admin' && ($settings?.showChangelog ?? true)) { + if ($user?.role === 'admin' && ($settings?.showChangelog ?? true)) { showChangelog.set($settings?.version !== $config.version); } @@ -199,14 +199,14 @@ temporaryChatEnabled.set(true); } - console.log($user.permissions); + console.log($user?.permissions); if ($user?.permissions?.chat?.temporary_enforced) { temporaryChatEnabled.set(true); } // Check for version updates - if ($user.role === 'admin') { + if ($user?.role === 'admin') { // Check if the user has dismissed the update toast in the last 24 hours if (localStorage.dismissedUpdateToast) { const dismissedUpdateToast = new Date(Number(localStorage.dismissedUpdateToast)); @@ -255,7 +255,7 @@ class=" text-gray-700 dark:text-gray-100 bg-white dark:bg-gray-900 h-screen max-h-[100dvh] overflow-auto flex flex-row justify-end" > {#if loaded} - {#if !['user', 'admin'].includes($user.role)} + {#if !['user', 'admin'].includes($user?.role)} {:else if localDBChats.length > 0}
From abba7c128c2b38199ff1a50a9de1fb277d4c840a Mon Sep 17 00:00:00 2001 From: Erik Date: Tue, 1 Apr 2025 08:21:03 +0200 Subject: [PATCH 498/623] [FEAT]-Adjust Translations for temporary chat --- .../components/chat/ChatPlaceholder.svelte | 20 ++++++++++--------- src/lib/components/chat/Placeholder.svelte | 20 ++++++++++--------- src/lib/i18n/locales/de-DE/translation.json | 5 +++-- src/lib/i18n/locales/en-GB/translation.json | 1 + src/lib/i18n/locales/en-US/translation.json | 1 + 5 files changed, 27 insertions(+), 20 deletions(-) diff --git a/src/lib/components/chat/ChatPlaceholder.svelte b/src/lib/components/chat/ChatPlaceholder.svelte index 9804dcaca0b..99eb8cbce96 100644 --- a/src/lib/components/chat/ChatPlaceholder.svelte +++ b/src/lib/components/chat/ChatPlaceholder.svelte @@ -29,6 +29,8 @@ $: models = modelIds.map((id) => $_models.find((m) => m.id === id)); + const tempChatTooltipText = 'This chat won’t appear in history and your messages will not be saved.'; + onMount(() => { mounted = true; }); @@ -67,15 +69,15 @@
{#if $temporaryChatEnabled} - -
- Temporary Chat -
-
+ +
+ {$i18n.t('Temporary Chat')} +
+
{/if}
$_models.find((m) => m.id === id)); + const tempChatTooltipText = 'This chat won’t appear in history and your messages will not be saved.'; + onMount(() => {});
{#if $temporaryChatEnabled} - -
- Temporary Chat -
-
+ +
+ {$i18n.t('Temporary Chat')} +
+
{/if}
Date: Tue, 1 Apr 2025 08:26:21 +0200 Subject: [PATCH 499/623] fix: update Polish translations for clarity and accuracy --- src/lib/i18n/locales/pl-PL/translation.json | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 0d7185a2060..7441bffc423 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -92,7 +92,7 @@ "Archive All Chats": "Archiwizuj wszystkie rozmowy", "Archived Chats": "Zarchiwizowane rozmowy", "archived-chat-export": "archiwizowany eksport czatu", - "Are you sure you want to clear all memories? This action cannot be undone.": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "Czy na pewno chcesz wyczyścić wszystkie wspomnienia? Tej akcji nie można cofnąć.", "Are you sure you want to delete this channel?": "Czy na pewno chcesz usunąć ten kanał?", "Are you sure you want to delete this message?": "Czy na pewno chcesz usunąć tę wiadomość?", "Are you sure you want to unarchive all archived chats?": "Czy na pewno chcesz przywrócić wszystkie zapisane rozmowy?", @@ -198,7 +198,7 @@ "Code formatted successfully": "Kod został sformatowany pomyślnie.", "Code Interpreter": "Interpreter kodu", "Code Interpreter Engine": "Silnik interpretatora kodu", - "Code Interpreter Prompt Template": "Szablon promtu interpretera kodu", + "Code Interpreter Prompt Template": "Szablon promptu interpretera kodu", "Collapse": "", "Collection": "Zbiór", "Color": "Kolor", @@ -222,7 +222,7 @@ "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", "Contact Admin for WebUI Access": "Skontaktuj się z administratorem, aby uzyskać dostęp do WebUI.", "Content": "Treść", - "Content Extraction Engine": "", + "Content Extraction Engine": "Silnik ekstrakcji treści", "Context Length": "Długość kontekstu", "Continue Response": "Kontynuuj odpowiedź", "Continue with {{provider}}": "Kontynuuj z {{provider}}", @@ -256,7 +256,7 @@ "Created At": "Utworzono o", "Created by": "Stworzone przez", "CSV Import": "Import CSV", - "Ctrl+Enter to Send": "", + "Ctrl+Enter to Send": "Ctrl+Enter aby wysłać", "Current Model": "Aktualny model", "Current Password": "Aktualne hasło", "Custom": "Niestandardowy", @@ -289,7 +289,7 @@ "Delete function?": "Czy na pewno chcesz usunąć funkcję?", "Delete Message": "Usuń wiadomość", "Delete message?": "Usuń wiadomość?", - "Delete prompt?": "Czy chcesz usunąć podpowiedź?", + "Delete prompt?": "Czy chcesz usunąć prompt?", "delete this link": "usuń to połączenie", "Delete tool?": "Usunąć narzędzie?", "Delete User": "Usuń użytkownika", @@ -416,7 +416,7 @@ "Enter model tag (e.g. {{modelTag}})": "Wprowadź znacznik modelu (np. {{modelTag}})", "Enter Mojeek Search API Key": "Wprowadź klucz API Mojeek Search", "Enter Number of Steps (e.g. 50)": "Podaj liczbę kroków (np. 50)", - "Enter Perplexity API Key": "", + "Enter Perplexity API Key": "Klucz API Perplexity", "Enter proxy URL (e.g. https://user:password@host:port)": "Podaj adres URL proxy (np. https://user:password@host:port)", "Enter reasoning effort": "Podaj powód wysiłku", "Enter Sampler (e.g. Euler a)": "Wprowadź sampler (np. Euler a)", @@ -465,14 +465,14 @@ "Example: mail": "Przykład: mail", "Example: ou=users,dc=foo,dc=example": "Przykład: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Przykład: sAMAccountName lub uid lub userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Przekroczono liczbę stanowisk w licencji. Skontaktuj się z pomocą techniczną, aby zwiększyć liczbę stanowisk.", "Exclude": "Wykluczyć", "Execute code for analysis": "Wykonaj kod do analizy", "Executing `{{NAME}}`...": "", "Expand": "", "Experimental": "Eksperymentalne", - "Explain": "", - "Explain this section to me in more detail": "", + "Explain": "Wyjaśnij", + "Explain this section to me in more detail": "Wyjaśnij mi ten fragment bardziej szczegółowo", "Explore the cosmos": "Odkrywaj kosmos", "Export": "Eksport", "Export All Archived Chats": "Wyeksportuj wszystkie archiwalne rozmowy", @@ -487,7 +487,7 @@ "Export to CSV": "Eksport do CSV", "Export Tools": "Eksportuj narzędzia", "External": "", - "External Models": "Zewnętrzne modele", + "External Models": "Modele lokalne", "Failed to add file.": "Nie udało się dodać pliku.", "Failed to connect to {{URL}} OpenAPI tool server": "", "Failed to create API Key.": "Nie udało się wygenerować klucza API.", @@ -541,9 +541,9 @@ "Functions allow arbitrary code execution": "Funkcje umożliwiają wykonanie dowolnego kodu", "Functions allow arbitrary code execution.": "Funkcje umożliwiają wykonanie dowolnego kodu.", "Functions imported successfully": "Funkcje zostały pomyślnie zaimportowane", - "Gemini": "", - "Gemini API Config": "", - "Gemini API Key is required.": "", + "Gemini": "Gemini", + "Gemini API Config": "Konfiguracja API Gemini", + "Gemini API Key is required.": "Wymagany jest klucz API Gemini.", "General": "Ogólne", "Generate an image": "Wygeneruj obraz", "Generate Image": "Wygeneruj obraz", @@ -585,8 +585,8 @@ "Image Generation (Experimental)": "Generowanie obrazu (eksperymentalne)", "Image Generation Engine": "Silnik generowania obrazów", "Image Max Compression Size": "Maksymalny rozmiar kompresji obrazu", - "Image Prompt Generation": "Generowanie podpowiedzi obrazu", - "Image Prompt Generation Prompt": "Generowanie obrazu Prompt", + "Image Prompt Generation": "Generowanie promptu obrazu", + "Image Prompt Generation Prompt": "Prompt do generowania obrazów", "Image Settings": "Ustawienia grafiki", "Images": "Obrazy", "Import Chats": "Importuj czaty", @@ -608,7 +608,7 @@ "Integration": "", "Interface": "Interfejs", "Invalid file format.": "Nieprawidłowy format pliku.", - "Invalid JSON schema": "", + "Invalid JSON schema": "Nieprawidłowy schemat JSON", "Invalid Tag": "Nieprawidłowy tag", "is typing...": "Pisanie...", "January": "Styczeń", @@ -636,7 +636,7 @@ "Kokoro.js (Browser)": "Kokoro.js (Przeglądarka)", "Kokoro.js Dtype": "Kokoro.js Dtype", "Label": "Nazwa serwera", - "Landing Page Mode": "Tryb strony głownej", + "Landing Page Mode": "Tryb strony głównej", "Language": "Język", "Last Active": "Ostatnio aktywny", "Last Modified": "Ostatnia modyfikacja", @@ -738,7 +738,7 @@ "No HTML, CSS, or JavaScript content found.": "Nie znaleziono żadnej zawartości HTML, CSS ani JavaScript.", "No inference engine with management support found": "Nie znaleziono silnika wnioskującego z obsługą zarządzania", "No knowledge found": "Brak znalezionej wiedzy", - "No memories to clear": "", + "No memories to clear": "Brak wspomnień do wyczyszczenia", "No model IDs": "Brak identyfikatorów modeli", "No models found": "Nie znaleziono modeli", "No models selected": "Brak wybranych modeli", @@ -805,7 +805,7 @@ "Permission denied when accessing microphone": "Odmowa dostępu podczas uzyskiwania dostępu do mikrofonu", "Permission denied when accessing microphone: {{error}}": "Odmowa dostępu do mikrofonu: {{error}}", "Permissions": "Uprawnienia", - "Perplexity API Key": "", + "Perplexity API Key": "Klucz API Perplexity", "Personalization": "Personalizacja", "Pin": "Przypnij", "Pinned": "Przypięty", @@ -833,20 +833,20 @@ "Previous 7 days": "Ostatnie 7 dni", "Private": "", "Profile Image": "Zdjęcie profilowe", - "Prompt": "Wprowadź podpowiedź: ", + "Prompt": "Wprowadź prompt: ", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (np. podaj ciekawostkę o Imperium Rzymskim)", "Prompt Autocompletion": "", "Prompt Content": "Treść podpowiedzi", - "Prompt created successfully": "Podpowiedź została utworzona pomyślnie", - "Prompt suggestions": "Sugestie podpowiedzi", - "Prompt updated successfully": "Podpowiedź została zaktualizowana pomyślnie.", - "Prompts": "Podpowiedzi", - "Prompts Access": "Dostęp do podpowiedzi", - "Prompts Public Sharing": "", - "Public": "", + "Prompt created successfully": "Prompt został utworzony pomyślnie", + "Prompt suggestions": "Sugestie promptów", + "Prompt updated successfully": "Prompt został zaktualizowany pomyślnie.", + "Prompts": "Prompty", + "Prompts Access": "Dostęp do promptów", + "Prompts Public Sharing": "Publiczne udostępnianie promptów", + "Public": "Publiczne", "Pull \"{{searchValue}}\" from Ollama.com": "Pobierz \"{{searchValue}}\" z Ollama.com", "Pull a model from Ollama.com": "Pobierz model z Ollama.com", - "Query Generation Prompt": "Podpowiedź do generowania zapytań", + "Query Generation Prompt": "Prompt do generowania zapytań", "RAG Template": "Szablon RAG", "Rating": "Ocena", "Re-rank models by topic similarity": "Ponowny ranking modeli według podobieństwa tematycznego", @@ -941,7 +941,7 @@ "Send": "Wyślij", "Send a Message": "Wyślij wiadomość", "Send message": "Wyślij wiadomość", - "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Wysyła `stream_options: { include_usage: true }` w żądaniu. Wspierane dostawcy zwrócą informacje o użyciu tokena w odpowiedzi, gdy jest on ustawiony.", + "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Wysyła `stream_options: { include_usage: true }` w żądaniu.\nObsługiwani dostawcy zwrócą informacje o użyciu tokenów w odpowiedzi, gdy to ustawienie jest aktywne.", "September": "Wrzesień", "SerpApi API Key": "Klucz API SerpApi", "SerpApi Engine": "Silnik SerpApi", @@ -975,7 +975,7 @@ "Share": "Podziel się", "Share Chat": "Udostępnij rozmowę", "Share to Open WebUI Community": "Udostępnij w społeczności OpenWebUI", - "Sharing Permissions": "", + "Sharing Permissions": "Uprawnienia udostępniania", "Show": "Wyświetl", "Show \"What's New\" modal on login": "Wyświetl okno dialogowe \"What's New\" podczas logowania", "Show Admin Details in Account Pending Overlay": "Wyświetl szczegóły administratora w okienu informacyjnym o potrzebie zatwierdzenia przez administratora konta użytkownika", @@ -1010,7 +1010,7 @@ "System": "System", "System Instructions": "Instrukcje systemowe", "System Prompt": "Podpowiedź systemowa", - "Tags": "", + "Tags": "Tagi", "Tags Generation": "Generowanie tagów", "Tags Generation Prompt": "Podpowiedź do generowania tagów", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", @@ -1093,7 +1093,7 @@ "Tools": "Narzędzia", "Tools Access": "Narzędzia Dostępu", "Tools are a function calling system with arbitrary code execution": "Narzędzia to system wywoływania funkcji z możliwością wykonania dowolnego kodu.", - "Tools Function Calling Prompt": "Narzędzia Funkcja Wywołania Prompta", + "Tools Function Calling Prompt": "Narzędzia Funkcja Wywołania Promptu", "Tools have a function calling system that allows arbitrary code execution": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonywanie dowolnego kodu", "Tools have a function calling system that allows arbitrary code execution.": "Narzędzia mają funkcję wywoływania systemu, która umożliwia wykonanie dowolnego kodu.", "Tools Public Sharing": "", @@ -1143,7 +1143,7 @@ "user": "użytkownik", "User": "Użytkownik", "User location successfully retrieved.": "Lokalizacja użytkownika została pomyślnie pobrana.", - "User Webhooks": "", + "User Webhooks": "Webhooki użytkownika", "Username": "Nazwa użytkownika", "Users": "Użytkownicy", "Using the default arena model with all models. Click the plus button to add custom models.": "Korzystanie z domyślnego modelu areny ze wszystkimi modelami. Kliknij przycisk plus, aby dodać niestandardowe modele.", @@ -1178,7 +1178,7 @@ "WebUI URL": "Adres URL interfejsu internetowego", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI będzie wysyłać żądania do \"{{url}}/api/chat\".", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI będzie wysyłać żądania do \"{{url}}/chat/completions\".", - "WebUI will make requests to \"{{url}}/openapi.json\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "WebUI będzie wysyłać żądania do \"{{url}}/openapi.json\"", "What are you trying to achieve?": "Do czego dążysz?", "What are you working on?": "Nad czym pracujesz?", "What’s New in": "Co nowego w", @@ -1211,6 +1211,6 @@ "Your account status is currently pending activation.": "Twoje konto oczekuje obecnie na aktywację.", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Cała Twoja wpłata trafi bezpośrednio do dewelopera wtyczki; Open WebUI nie pobiera żadnej prowizji. Należy jednak pamiętać, że wybrana platforma finansowania może mieć własne opłaty.", "Youtube": "Youtube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "Język Youtube", + "Youtube Proxy URL": "URL proxy Youtube" } From 295c7eb4c76a7b5973ad964e3d35b619446230f4 Mon Sep 17 00:00:00 2001 From: Erik Date: Tue, 1 Apr 2025 08:27:29 +0200 Subject: [PATCH 500/623] [improvement] default permission for new groups is false for enforce temp chat --- src/lib/components/admin/Users/Groups.svelte | 2 +- src/lib/components/admin/Users/Groups/Permissions.svelte | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/components/admin/Users/Groups.svelte b/src/lib/components/admin/Users/Groups.svelte index 15497cb205b..e2375a624e9 100644 --- a/src/lib/components/admin/Users/Groups.svelte +++ b/src/lib/components/admin/Users/Groups.svelte @@ -64,7 +64,7 @@ delete: true, edit: true, temporary: true, - temporary_enforced: true + temporary_enforced: false }, features: { web_search: true, diff --git a/src/lib/components/admin/Users/Groups/Permissions.svelte b/src/lib/components/admin/Users/Groups/Permissions.svelte index e1aa73f2a25..389477166f0 100644 --- a/src/lib/components/admin/Users/Groups/Permissions.svelte +++ b/src/lib/components/admin/Users/Groups/Permissions.svelte @@ -25,7 +25,7 @@ edit: true, file_upload: true, temporary: true, - temporary_enforced: true + temporary_enforced: false }, features: { web_search: true, From b60beb6ff66180709dc7d906557a85476f9307ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20P=C4=99kala?= Date: Tue, 1 Apr 2025 08:39:36 +0200 Subject: [PATCH 501/623] fix: improve Polish translations for clarity and accuracy --- src/lib/i18n/locales/pl-PL/translation.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 7441bffc423..1329a7c5f69 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -10,8 +10,8 @@ "{{COUNT}} hidden lines": "", "{{COUNT}} Replies": "{{COUNT}} odpowiedzi", "{{user}}'s Chats": "Czaty użytkownika {{user}}", - "{{webUIName}} Backend Required": "Backend dla {{webUIName}} wymagany", - "*Prompt node ID(s) are required for image generation": "Wymagane są identyfikatory węzłów wyzwalających do generowania obrazu.", + "{{webUIName}} Backend Required": "Backend dla {{webUIName}} jest wymagany", + "*Prompt node ID(s) are required for image generation": "Wymagane są identyfikatory węzłów wyzwalających do generowania obrazów.", "A new version (v{{LATEST_VERSION}}) is now available.": "Dostępna jest nowa wersja (v{{LATEST_VERSION}}).", "A task model is used when performing tasks such as generating titles for chats and web search queries": "Model zadań jest wykorzystywany podczas realizacji zadań, takich jak generowanie tytułów rozmów i zapytań wyszukiwania internetowego.", "a user": "użytkownik", @@ -110,7 +110,7 @@ "August": "Sierpień", "Authenticate": "Zaloguj się", "Authentication": "Uwierzytelnianie", - "Auto-Copy Response to Clipboard": "Automatyczna kopiuj odpowiedz do schowka", + "Auto-Copy Response to Clipboard": "Automatyczne kopiowanie odpowiedzi do schowka", "Auto-playback response": "Automatyczna odpowiedź na powtórzenie", "Autocomplete Generation": "Generowanie autouzupełniania", "Autocomplete Generation Input Max Length": "Maksymalna długość wejścia dla generowania autouzupełniania", @@ -186,7 +186,7 @@ "Click here to upload a workflow.json file.": "Kliknij tutaj, aby przesłać plik workflow.json.", "click here.": "kliknij tutaj.", "Click on the user role button to change a user's role.": "Kliknij w nazwą roli użytkownika, aby zmienić rolę użytkownika.", - "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Zezwolenie na zapis do schowka odmówione. Sprawdź ustawienia przeglądarki, aby przyznać wymagany dostęp.", + "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Nie można było skopiować do schowka. Sprawdź ustawienia przeglądarki, aby przyznać wymagany dostęp.", "Clone": "Sklonuj", "Clone Chat": "Sklonuj czat", "Clone of {{TITLE}}": "Klon {{TITLE}}", @@ -194,7 +194,7 @@ "Code execution": "Wykonanie kodu", "Code Execution": "Wykonanie kodu", "Code Execution Engine": "Silnik wykonawczy kodu", - "Code Execution Timeout": "", + "Code Execution Timeout": "Limit czasu wykonania kodu", "Code formatted successfully": "Kod został sformatowany pomyślnie.", "Code Interpreter": "Interpreter kodu", "Code Interpreter Engine": "Silnik interpretatora kodu", @@ -331,7 +331,7 @@ "Documents": "Dokumenty", "does not make any external connections, and your data stays securely on your locally hosted server.": "nie nawiązuje żadnych zewnętrznych połączeń, a Twoje dane pozostają bezpiecznie na Twoim lokalnie hostowanym serwerze.", "Domain Filter List": "Lista filtrów domeny", - "Don't have an account?": "Czy nie masz konta?", + "Don't have an account?": "Nie masz konta?", "don't install random functions from sources you don't trust.": "Nie instaluj losowych funkcji z niezaufanych źródeł.", "don't install random tools from sources you don't trust.": "Nie instaluj przypadkowych narzędzi z niezaufanych źródeł.", "Don't like the style": "Nie przypadł mi do gustu styl", @@ -370,7 +370,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Model osadzania ustawiony na '{{embedding_model}}'", "Enable API Key": "Włącz klucz API", "Enable autocomplete generation for chat messages": "Włącz generowanie autouzupełniania dla wiadomości czatu", - "Enable Code Execution": "", + "Enable Code Execution": "Włącz wykonywanie kodu", "Enable Code Interpreter": "Włącz interpreter kodu", "Enable Community Sharing": "Włączanie udostępniania społecznościowego", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Włącz blokowanie pamięci (mlock), aby zapobiec swappingowi danych modelu z RAM. Ta opcja blokuje zbiór stron roboczych modelu w RAM, co gwarantuje, że nie będą one wymieniane na dysk. Może to pomóc w utrzymaniu wydajności poprzez unikanie błędów strony i zapewnienie szybkiego dostępu do danych.", From 9f470a4ff50043821d37c57256835d9e67804203 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20P=C4=99kala?= Date: Tue, 1 Apr 2025 08:53:12 +0200 Subject: [PATCH 502/623] fix: update Polish translations to sound more natural --- src/lib/i18n/locales/pl-PL/translation.json | 28 ++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 1329a7c5f69..9fab9e535c2 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -51,7 +51,7 @@ "Admin": "Administrator", "Admin Panel": "Panel administracyjny", "Admin Settings": "Ustawienia administratora", - "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratorzy mają dostęp do wszystkich narzędzi przez cały czas; użytkownicy potrzebują przypisanych narzędzi zgodnie z modelem w przestrzeni roboczej.", + "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Administratorzy mają dostęp do wszystkich narzędzi przez cały czas; użytkownicy muszą mieć przydzielone narzędzia dla każdego modelu w przestrzeni roboczej.", "Advanced Parameters": "Zaawansowane ustawienia", "Advanced Params": "Zaawansowane ustawienia", "All": "", @@ -172,7 +172,7 @@ "Ciphers": "Szyfry", "Citation": "Cytat", "Clear memory": "Wyczyść pamięć", - "Clear Memory": "", + "Clear Memory": "Wyczyść pamięć", "click here": "kliknij tutaj", "Click here for filter guides.": "Kliknij tutaj, aby uzyskać podpowiedź do filtrów.", "Click here for help.": "Kliknij tutaj, aby uzyskać pomoc.", @@ -185,7 +185,7 @@ "Click here to select a py file.": "Kliknij tutaj, aby wybrać plik py.", "Click here to upload a workflow.json file.": "Kliknij tutaj, aby przesłać plik workflow.json.", "click here.": "kliknij tutaj.", - "Click on the user role button to change a user's role.": "Kliknij w nazwą roli użytkownika, aby zmienić rolę użytkownika.", + "Click on the user role button to change a user's role.": "Kliknij przycisk roli użytkownika, aby zmienić jego uprawnienia.", "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Nie można było skopiować do schowka. Sprawdź ustawienia przeglądarki, aby przyznać wymagany dostęp.", "Clone": "Sklonuj", "Clone Chat": "Sklonuj czat", @@ -200,7 +200,7 @@ "Code Interpreter Engine": "Silnik interpretatora kodu", "Code Interpreter Prompt Template": "Szablon promptu interpretera kodu", "Collapse": "", - "Collection": "Zbiór", + "Collection": "Kolekcja", "Color": "Kolor", "ComfyUI": "ComfyUI", "ComfyUI API Key": "Klucz API ComfyUI", @@ -252,7 +252,7 @@ "Create Knowledge": "Utwórz wiedzę", "Create new key": "Utwórz nowy klucz", "Create new secret key": "Utwórz nowy secret key", - "Created at": "Stworzone o", + "Created at": "Utworzono o", "Created At": "Utworzono o", "Created by": "Stworzone przez", "CSV Import": "Import CSV", @@ -468,7 +468,7 @@ "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Przekroczono liczbę stanowisk w licencji. Skontaktuj się z pomocą techniczną, aby zwiększyć liczbę stanowisk.", "Exclude": "Wykluczyć", "Execute code for analysis": "Wykonaj kod do analizy", - "Executing `{{NAME}}`...": "", + "Executing `{{NAME}}`...": "Wykonywanie `{{NAME}}`...", "Expand": "", "Experimental": "Eksperymentalne", "Explain": "Wyjaśnij", @@ -972,7 +972,7 @@ "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ustawia sekwencje stopu do użycia. Gdy ten wzorzec zostanie napotkany, LLM przestanie generować tekst i zwróci wynik. Można skonfigurować wiele sekwencji stopu, określając kilka oddzielnych parametrów stopu w pliku modelu.", "Settings": "Ustawienia", "Settings saved successfully!": "Ustawienia zostały zapisane pomyślnie!", - "Share": "Podziel się", + "Share": "Udostępnij", "Share Chat": "Udostępnij rozmowę", "Share to Open WebUI Community": "Udostępnij w społeczności OpenWebUI", "Sharing Permissions": "Uprawnienia udostępniania", @@ -997,7 +997,7 @@ "Speech-to-Text Engine": "Silnik konwersji mowy na tekst", "Stop": "Zatrzymaj się", "Stop Sequence": "Zatrzymaj sekwencję", - "Stream Chat Response": "Odpowiedź czatu strumieniowego", + "Stream Chat Response": "Strumieniowanie odpowiedzi z czatu", "STT Model": "Model STT", "STT Settings": "Ustawienia STT", "Subtitle (e.g. about the Roman Empire)": "Podtytuł (np. o Imperium Rzymskim)", @@ -1009,10 +1009,10 @@ "Sync directory": "Sync directory", "System": "System", "System Instructions": "Instrukcje systemowe", - "System Prompt": "Podpowiedź systemowa", + "System Prompt": "Prompt systemowy", "Tags": "Tagi", "Tags Generation": "Generowanie tagów", - "Tags Generation Prompt": "Podpowiedź do generowania tagów", + "Tags Generation Prompt": "Prompt do generowania tagów", "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", "Talk to model": "", "Tap to interrupt": "Kliknij, aby przerwać", @@ -1025,7 +1025,7 @@ "Text Splitter": "Rozdzielacz tekstu", "Text-to-Speech Engine": "Silnik konwersji tekstu na mowę", "Tfs Z": "Tfs Z", - "Thanks for your feedback!": "Dziękuję za twoją opinię!", + "Thanks for your feedback!": "Dziękujemy za twoją opinię!", "The Application Account DN you bind with for search": "Konto techniczne w formacie DN, z którym się wiążesz w celu przeszukiwania", "The base to search for users": "Podstawa do wyszukiwania użytkowników", "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", @@ -1033,7 +1033,7 @@ "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tablica wyników oceny opiera się na systemie rankingu Elo i jest aktualizowana w czasie rzeczywistym.", "The LDAP attribute that maps to the mail that users use to sign in.": "Atrybut LDAP, który mapuje się na adres e-mail używany przez użytkowników do logowania.", "The LDAP attribute that maps to the username that users use to sign in.": "Atrybut LDAP, który mapuje się na nazwę użytkownika, którą użytkownicy używają do logowania.", - "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Tablica wyników jest obecnie w fazie beta i możemy dostosować obliczenia oceny, gdy udoskonalamy algorytm.", + "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Tablica wyników jest w wersji beta, więc w miarę udoskonalania algorytmu możemy jeszcze modyfikować sposób obliczania ocen.", "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksymalny rozmiar pliku w MB. Jeśli rozmiar pliku przekroczy ten limit, plik nie zostanie przesłany.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksymalna liczba plików, które można użyć jednocześnie w czacie. Jeśli liczba plików przekroczy ten limit, pliki nie zostaną przesłane.", "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Wynik powinien być wartością pomiędzy 0,0 (0%) a 1,0 (100%).", @@ -1054,8 +1054,8 @@ "This will delete all models including custom models and cannot be undone.": "To usunie wszystkie modele, w tym niestandardowe modele, i nie można tego cofnąć.", "This will reset the knowledge base and sync all files. Do you wish to continue?": "Czy chcesz wyzerować bazę wiedzy i zsynchronizować wszystkie pliki? Proszę potwierdź swoją decyzję.", "Thorough explanation": "Szczegółowe wyjaśnienie", - "Thought for {{DURATION}}": "Myśl na {{DURATION}}", - "Thought for {{DURATION}} seconds": "Myśl przez {{DURATION}} sekund", + "Thought for {{DURATION}}": "Myślenie przez {{DURATION}}", + "Thought for {{DURATION}} seconds": "Myślenie przez {{DURATION}} sekund", "Tika": "Tika", "Tika Server URL required.": "Wymagany jest adres URL serwera Tika.", "Tiktoken": "Tiktoken", From c8210d4043d99c470d7f49a406dcee25a4bea388 Mon Sep 17 00:00:00 2001 From: Erik Date: Tue, 1 Apr 2025 09:06:15 +0200 Subject: [PATCH 503/623] upated like in PR discussed --- src/lib/components/chat/ChatPlaceholder.svelte | 4 +--- src/lib/components/chat/Placeholder.svelte | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/lib/components/chat/ChatPlaceholder.svelte b/src/lib/components/chat/ChatPlaceholder.svelte index 99eb8cbce96..43fc94aeb0c 100644 --- a/src/lib/components/chat/ChatPlaceholder.svelte +++ b/src/lib/components/chat/ChatPlaceholder.svelte @@ -28,8 +28,6 @@ } $: models = modelIds.map((id) => $_models.find((m) => m.id === id)); - - const tempChatTooltipText = 'This chat won’t appear in history and your messages will not be saved.'; onMount(() => { mounted = true; @@ -70,7 +68,7 @@ {#if $temporaryChatEnabled} diff --git a/src/lib/components/chat/Placeholder.svelte b/src/lib/components/chat/Placeholder.svelte index db70d7602b4..be700ab009f 100644 --- a/src/lib/components/chat/Placeholder.svelte +++ b/src/lib/components/chat/Placeholder.svelte @@ -86,8 +86,6 @@ } $: models = selectedModels.map((id) => $_models.find((m) => m.id === id)); - - const tempChatTooltipText = 'This chat won’t appear in history and your messages will not be saved.'; onMount(() => {}); @@ -95,7 +93,7 @@
{#if $temporaryChatEnabled} From 29d57453355e4323f40d5b0706e3e8ed9c236255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20P=C4=99kala?= Date: Tue, 1 Apr 2025 09:14:29 +0200 Subject: [PATCH 504/623] fix: update Polish translation for "Capture" to improve accuracy --- src/lib/i18n/locales/pl-PL/translation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/i18n/locales/pl-PL/translation.json b/src/lib/i18n/locales/pl-PL/translation.json index 9fab9e535c2..66793362ea9 100644 --- a/src/lib/i18n/locales/pl-PL/translation.json +++ b/src/lib/i18n/locales/pl-PL/translation.json @@ -146,7 +146,7 @@ "Camera": "Kamera", "Cancel": "Anuluj", "Capabilities": "Możliwości", - "Capture": "Uchwycić", + "Capture": "Przechwyć", "Certificate Path": "Ścieżka certyfikatu", "Change Password": "Zmień hasło", "Channel Name": "Nazwa kanału", From 85c8a9bd6886cbfd4e66a44edcb81559e605fba9 Mon Sep 17 00:00:00 2001 From: Panda Date: Tue, 1 Apr 2025 10:47:52 +0200 Subject: [PATCH 505/623] i18n: zh-cn --- src/lib/i18n/locales/zh-CN/translation.json | 38 ++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index eeeff3d4769..106674c431e 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -119,7 +119,7 @@ "AUTOMATIC1111 Base URL": "AUTOMATIC1111 基础地址", "AUTOMATIC1111 Base URL is required.": "需要 AUTOMATIC1111 基础地址。", "Available list": "可用列表", - "Available Tool Servers": "", + "Available Tool Servers": "可用的工具服务器", "available!": "版本可用!", "Awful": "糟糕", "Azure AI Speech": "Azure AI 语音", @@ -344,7 +344,7 @@ "Draw": "平局", "Drop any files here to add to the conversation": "拖动文件到此处以添加到对话中", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "例如 '30s','10m'。有效的时间单位是秒:'s',分:'m',时:'h'。", - "e.g. \"json\" or a JSON schema": "", + "e.g. \"json\" or a JSON schema": "例如 "json" 或 JSON 结构", "e.g. 60": "例如 '60'", "e.g. A filter to remove profanity from text": "例如:一个用于过滤文本中不当内容的过滤器", "e.g. My Filter": "例如:我的过滤器", @@ -379,7 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "启用Mirostat采样以控制困惑度", "Enable New Sign Ups": "允许新用户注册", "Enabled": "启用", - "Enforce Temporary Chat": "", + "Enforce Temporary Chat": "强制临时聊天", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。", "Enter {{role}} message here": "在此处输入 {{role}} 的对话内容", "Enter a detail about yourself for your LLMs to recall": "输入一个关于你自己的详细信息,方便你的大语言模型记住这些内容", @@ -468,7 +468,7 @@ "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "已达到最大授权人数,请联系支持人员提升授权人数。", "Exclude": "排除", "Execute code for analysis": "执行代码进行分析", - "Executing `{{NAME}}`...": "", + "Executing `{{NAME}}`...": "正在执行 `{{NAME}}`...", "Expand": "展开", "Experimental": "实验性", "Explain": "解释", @@ -489,7 +489,7 @@ "External": "外部", "External Models": "外部模型", "Failed to add file.": "添加文件失败。", - "Failed to connect to {{URL}} OpenAPI tool server": "", + "Failed to connect to {{URL}} OpenAPI tool server": "无法连接到 {{URL}} OpenAPI 工具服务器", "Failed to create API Key.": "无法创建 API 密钥。", "Failed to fetch models": "无法获取模型", "Failed to read clipboard contents": "无法读取剪贴板内容", @@ -570,7 +570,7 @@ "Hex Color": "十六进制颜色代码", "Hex Color - Leave empty for default color": "十六进制颜色代码 - 留空使用默认颜色", "Hide": "隐藏", - "Hide Model": "", + "Hide Model": "隐藏模型", "Home": "主页", "Host": "主机", "How can I help you today?": "有什么我能帮您的吗?", @@ -608,7 +608,7 @@ "Integration": "集成", "Interface": "界面", "Invalid file format.": "无效文件格式。", - "Invalid JSON schema": "", + "Invalid JSON schema": "无效的 JSON", "Invalid Tag": "无效标签", "is typing...": "输入中...", "January": "一月", @@ -630,7 +630,7 @@ "Knowledge Access": "访问知识库", "Knowledge created successfully.": "知识成功创建", "Knowledge deleted successfully.": "知识成功删除", - "Knowledge Public Sharing": "", + "Knowledge Public Sharing": "知识公开共享", "Knowledge reset successfully.": "知识成功重置", "Knowledge updated successfully": "知识成功更新", "Kokoro.js (Browser)": "Kokoro.js (Browser)", @@ -700,8 +700,8 @@ "Model {{modelId}} not found": "未找到模型 {{modelId}}", "Model {{modelName}} is not vision capable": "模型 {{modelName}} 不支持视觉能力", "Model {{name}} is now {{status}}": "模型 {{name}} 现在是 {{status}}", - "Model {{name}} is now hidden": "", - "Model {{name}} is now visible": "", + "Model {{name}} is now hidden": "模型 {{name}} 现已隐藏", + "Model {{name}} is now visible": "模型 {{name}} 现已可见", "Model accepts image inputs": "模型接受图像输入", "Model created successfully!": "模型创建成功!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "检测到模型文件系统路径,无法继续进行。更新操作需要提供模型简称。", @@ -717,7 +717,7 @@ "Models": "模型", "Models Access": "访问模型列表", "Models configuration saved successfully": "模型配置保存成功", - "Models Public Sharing": "", + "Models Public Sharing": "模型公开分享", "Mojeek Search API Key": "Mojeek Search API 密钥", "more": "更多", "More": "更多", @@ -834,15 +834,15 @@ "Private": "私有", "Profile Image": "用户头像", "Prompt": "提示词 (Prompt)", - "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示(例如:给我讲一个关于罗马帝国的趣事。)", - "Prompt Autocompletion": "", + "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示词(例如:给我讲一个关于罗马帝国的趣事。)", + "Prompt Autocompletion": "提示词自动完成", "Prompt Content": "提示词内容", "Prompt created successfully": "提示词创建成功", "Prompt suggestions": "提示词建议", "Prompt updated successfully": "提示词更新成功", "Prompts": "提示词", "Prompts Access": "访问提示词", - "Prompts Public Sharing": "", + "Prompts Public Sharing": "提示词公开分享", "Public": "公共", "Pull \"{{searchValue}}\" from Ollama.com": "从 Ollama.com 拉取 \"{{searchValue}}\"", "Pull a model from Ollama.com": "从 Ollama.com 拉取一个模型", @@ -975,11 +975,11 @@ "Share": "分享", "Share Chat": "分享对话", "Share to Open WebUI Community": "分享到 OpenWebUI 社区", - "Sharing Permissions": "", + "Sharing Permissions": "共享权限", "Show": "显示", "Show \"What's New\" modal on login": "在登录时显示“更新内容”弹窗", "Show Admin Details in Account Pending Overlay": "在用户待激活界面中显示管理员邮箱等详细信息", - "Show Model": "", + "Show Model": "显示模型", "Show shortcuts": "显示快捷方式", "Show your support!": "表达你的支持!", "Showcased creativity": "很有创意", @@ -1096,7 +1096,7 @@ "Tools Function Calling Prompt": "工具函数调用提示词", "Tools have a function calling system that allows arbitrary code execution": "注意:工具有权执行任意代码", "Tools have a function calling system that allows arbitrary code execution.": "注意:工具有权执行任意代码。", - "Tools Public Sharing": "", + "Tools Public Sharing": "工具公开分享", "Top K": "Top K", "Top K Reranker": "Top K Reranker", "Top P": "Top P", @@ -1143,7 +1143,7 @@ "user": "用户", "User": "用户", "User location successfully retrieved.": "成功检索到用户位置。", - "User Webhooks": "", + "User Webhooks": "用户 Webhook", "Username": "用户名", "Users": "用户", "Using the default arena model with all models. Click the plus button to add custom models.": "竞技场模型默认使用所有模型。单击加号按钮添加自定义模型。", @@ -1158,7 +1158,7 @@ "Version": "版本", "Version {{selectedVersion}} of {{totalVersions}}": "版本 {{selectedVersion}}/{{totalVersions}}", "View Replies": "查看回复", - "View Result from `{{NAME}}`": "", + "View Result from `{{NAME}}`": "查看 `{{NAME}}` 的结果", "Visibility": "可见性", "Voice": "语音", "Voice Input": "语音输入", From 8799ff9575d69921eab1f499701f40b7dc2ae84f Mon Sep 17 00:00:00 2001 From: Panda Date: Tue, 1 Apr 2025 10:52:05 +0200 Subject: [PATCH 506/623] fix --- src/lib/i18n/locales/zh-CN/translation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index 106674c431e..cafcd152ab6 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -344,7 +344,7 @@ "Draw": "平局", "Drop any files here to add to the conversation": "拖动文件到此处以添加到对话中", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "例如 '30s','10m'。有效的时间单位是秒:'s',分:'m',时:'h'。", - "e.g. \"json\" or a JSON schema": "例如 "json" 或 JSON 结构", + "e.g. \"json\" or a JSON schema": "例如 \"json\" 或 JSON 结构", "e.g. 60": "例如 '60'", "e.g. A filter to remove profanity from text": "例如:一个用于过滤文本中不当内容的过滤器", "e.g. My Filter": "例如:我的过滤器", From fa72c277db24930204686d6598f5be0f6c6a3780 Mon Sep 17 00:00:00 2001 From: Tiancong Li Date: Tue, 1 Apr 2025 17:10:32 +0800 Subject: [PATCH 507/623] i18n: update zh-TW --- src/lib/i18n/locales/zh-TW/translation.json | 24 ++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/lib/i18n/locales/zh-TW/translation.json b/src/lib/i18n/locales/zh-TW/translation.json index 25ff67a5d5f..b1f252cf80e 100644 --- a/src/lib/i18n/locales/zh-TW/translation.json +++ b/src/lib/i18n/locales/zh-TW/translation.json @@ -379,7 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "啟用 Mirostat 取樣以控制 perplexity。", "Enable New Sign Ups": "允許新使用者註冊", "Enabled": "已啟用", - "Enforce Temporary Chat": "", + "Enforce Temporary Chat": "強制使用臨時聊天", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "請確認您的 CSV 檔案包含以下 4 個欄位,並按照此順序排列:姓名、電子郵件、密碼、角色。", "Enter {{role}} message here": "在此輸入 {{role}} 訊息", "Enter a detail about yourself for your LLMs to recall": "輸入有關您的詳細資訊,讓您的大型語言模型可以回想起來", @@ -570,7 +570,7 @@ "Hex Color": "Hex 顔色", "Hex Color - Leave empty for default color": "Hex 顔色 —— 留空以使用預設顔色", "Hide": "隱藏", - "Hide Model": "", + "Hide Model": "隱藏模型", "Home": "首頁", "Host": "主機", "How can I help you today?": "今天我能為您做些什麼?", @@ -630,7 +630,7 @@ "Knowledge Access": "知識存取", "Knowledge created successfully.": "知識建立成功。", "Knowledge deleted successfully.": "知識刪除成功。", - "Knowledge Public Sharing": "", + "Knowledge Public Sharing": "知識庫公開分享", "Knowledge reset successfully.": "知識重設成功。", "Knowledge updated successfully": "知識更新成功", "Kokoro.js (Browser)": "Kokoro.js (Browser)", @@ -700,8 +700,8 @@ "Model {{modelId}} not found": "找不到模型 {{modelId}}", "Model {{modelName}} is not vision capable": "模型 {{modelName}} 不具備視覺能力", "Model {{name}} is now {{status}}": "模型 {{name}} 現在狀態為 {{status}}", - "Model {{name}} is now hidden": "", - "Model {{name}} is now visible": "", + "Model {{name}} is now hidden": "模型 {{name}} 現已隱藏", + "Model {{name}} is now visible": "模型 {{name}} 現已可見", "Model accepts image inputs": "模型接受影像輸入", "Model created successfully!": "成功建立模型!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "偵測到模型檔案系統路徑。更新需要模型簡稱,因此無法繼續。", @@ -717,7 +717,7 @@ "Models": "模型", "Models Access": "模型存取", "Models configuration saved successfully": "模型設定儲存成功", - "Models Public Sharing": "", + "Models Public Sharing": "模型公開分享", "Mojeek Search API Key": "Mojeek 搜尋 API 金鑰", "more": "更多", "More": "更多", @@ -835,14 +835,14 @@ "Profile Image": "個人檔案圖片", "Prompt": "提示詞", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示詞(例如:告訴我關於羅馬帝國的一些趣事)", - "Prompt Autocompletion": "", + "Prompt Autocompletion": "提示詞自動補全", "Prompt Content": "提示詞內容", "Prompt created successfully": "提示詞建立成功", "Prompt suggestions": "提示詞建議", "Prompt updated successfully": "提示詞更新成功", "Prompts": "提示詞", "Prompts Access": "提示詞存取", - "Prompts Public Sharing": "", + "Prompts Public Sharing": "提示詞公開分享", "Public": "公開", "Pull \"{{searchValue}}\" from Ollama.com": "從 Ollama.com 下載「{{searchValue}}」", "Pull a model from Ollama.com": "從 Ollama.com 下載模型", @@ -975,11 +975,11 @@ "Share": "分享", "Share Chat": "分享對話", "Share to Open WebUI Community": "分享到 OpenWebUI 社群", - "Sharing Permissions": "", + "Sharing Permissions": "分享權限設定", "Show": "顯示", "Show \"What's New\" modal on login": "登入時顯示「新功能」對話框", "Show Admin Details in Account Pending Overlay": "在帳號待審覆蓋層中顯示管理員詳細資訊", - "Show Model": "", + "Show Model": "顯示模型", "Show shortcuts": "顯示快捷鍵", "Show your support!": "表達您的支持!", "Showcased creativity": "展現創意", @@ -1096,7 +1096,7 @@ "Tools Function Calling Prompt": "工具函式呼叫提示詞", "Tools have a function calling system that allows arbitrary code execution": "工具具有允許執行任意程式碼的函式呼叫系統", "Tools have a function calling system that allows arbitrary code execution.": "工具具有允許執行任意程式碼的函式呼叫系統。", - "Tools Public Sharing": "", + "Tools Public Sharing": "工具公開分享", "Top K": "Top K", "Top K Reranker": "Top K Reranker", "Top P": "Top P", @@ -1143,7 +1143,7 @@ "user": "使用者", "User": "使用者", "User location successfully retrieved.": "成功取得使用者位置。", - "User Webhooks": "", + "User Webhooks": "使用者 Webhooks", "Username": "使用者名稱", "Users": "使用者", "Using the default arena model with all models. Click the plus button to add custom models.": "正在使用預設競技模型與所有模型。點選加號按鈕以新增自訂模型。", From 825becceb023ce1a0bcefe90a1db384942f6890a Mon Sep 17 00:00:00 2001 From: Said Ouhdachi Date: Tue, 1 Apr 2025 13:10:59 +0200 Subject: [PATCH 508/623] Arabic Translation --- src/lib/i18n/locales/ar/translation.json | 1178 ++++++++++++++++++++++ src/lib/i18n/locales/languages.json | 6 +- 2 files changed, 1183 insertions(+), 1 deletion(-) create mode 100644 src/lib/i18n/locales/ar/translation.json diff --git a/src/lib/i18n/locales/ar/translation.json b/src/lib/i18n/locales/ar/translation.json new file mode 100644 index 00000000000..892bd53acef --- /dev/null +++ b/src/lib/i18n/locales/ar/translation.json @@ -0,0 +1,1178 @@ +{ + "-1 for no limit, or a positive integer for a specific limit": "-1 لعدم وجود حد، أو عدد صحيح موجب لحد معين", + "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "الحروف 's'، 'm'، 'h'، 'd'، 'w' أو '-1' لعدم انتهاء الصلاحية.", + "(e.g. `sh webui.sh --api --api-auth username_password`)": "(مثال: `sh webui.sh --api --api-auth اسم_المستخدم_كلمة_المرور`)", + "(e.g. `sh webui.sh --api`)": "(مثال: تشغيل الأمر: `sh webui.sh --api`)", + "(latest)": "(أحدث)", + "{{ models }}": "النماذج: {{ models }}", + "{{COUNT}} hidden lines": "{{COUNT}} سطر/أسطر مخفية", + "{{COUNT}} Replies": "{{COUNT}} رد/ردود", + "{{user}}'s Chats": "محادثات المستخدم {{user}}", + "{{webUIName}} Backend Required": "يتطلب الخلفية الخاصة بـ {{webUIName}}", + "*Prompt node ID(s) are required for image generation": "*معرّف/معرّفات عقدة الموجه مطلوبة لتوليد الصور", + "A new version (v{{LATEST_VERSION}}) is now available.": "يتوفر الآن إصدار جديد (v{{LATEST_VERSION}}).", + "A task model is used when performing tasks such as generating titles for chats and web search queries": "يُستخدم نموذج المهام عند تنفيذ مهام مثل توليد عناوين المحادثات واستعلامات البحث على الويب", + "a user": "مستخدم", + "About": "حول", + "Accept autocomplete generation / Jump to prompt variable": "قبول توليد الإكمال التلقائي / الانتقال إلى متغير الموجه", + "Access": "الوصول", + "Access Control": "التحكم في الوصول", + "Accessible to all users": "متاح لجميع المستخدمين", + "Account": "الحساب", + "Account Activation Pending": "انتظار تفعيل الحساب", + "Accurate information": "معلومات دقيقة", + "Actions": "الإجراءات", + "Activate": "تفعيل", + "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "قم بتفعيل هذا الأمر بكتابة \"/{{COMMAND}}\" في مدخل المحادثة.", + "Active Users": "المستخدمون النشطون", + "Add": "إضافة", + "Add a model ID": "إضافة معرّف نموذج", + "Add a short description about what this model does": "أضف وصفًا قصيرًا لما يفعله هذا النموذج", + "Add a tag": "أضف وسم", + "Add Arena Model": "إضافة نموذج الساحة", + "Add Connection": "إضافة اتصال", + "Add Content": "إضافة محتوى", + "Add content here": "أضف المحتوى هنا", + "Add custom prompt": "إضافة موجه مخصص", + "Add Files": "إضافة ملفات", + "Add Group": "إضافة مجموعة", + "Add Memory": "إضافة ذاكرة", + "Add Model": "إضافة نموذج", + "Add Reaction": "إضافة تفاعل", + "Add Tag": "إضافة وسم", + "Add Tags": "إضافة وسوم", + "Add text content": "إضافة محتوى نصي", + "Add User": "إضافة مستخدم", + "Add User Group": "إضافة مجموعة مستخدمين", + "Adjusting these settings will apply changes universally to all users.": "تعديل هذه الإعدادات سيطبق التغييرات على جميع المستخدمين بشكل عام.", + "admin": "المسؤول", + "Admin": "المسؤول", + "Admin Panel": "لوحة المسؤول", + "Admin Settings": "إعدادات المسؤول", + "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "للمسؤولين الوصول إلى جميع الأدوات في جميع الأوقات؛ بينما يحتاج المستخدمون إلى تعيين أدوات لكل نموذج في مساحة العمل.", + "Advanced Parameters": "المعلمات المتقدمة", + "Advanced Params": "المعلمات المتقدمة", + "All": "الكل", + "All Documents": "جميع المستندات", + "All models deleted successfully": "تم حذف جميع النماذج بنجاح", + "Allow Chat Controls": "السماح بوسائل التحكم في المحادثة", + "Allow Chat Delete": "السماح بحذف المحادثة", + "Allow Chat Deletion": "السماح بحذف المحادثة", + "Allow Chat Edit": "السماح بتعديل المحادثة", + "Allow File Upload": "السماح بتحميل الملفات", + "Allow non-local voices": "السماح بالأصوات غير المحلية", + "Allow Temporary Chat": "السماح بالمحادثة المؤقتة", + "Allow User Location": "السماح بتحديد موقع المستخدم", + "Allow Voice Interruption in Call": "السماح بانقطاع الصوت أثناء المكالمة", + "Allowed Endpoints": "النقاط النهائية المسموح بها", + "Already have an account?": "هل لديك حساب بالفعل؟", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "بديل للـ top_p، ويهدف إلى ضمان توازن بين الجودة والتنوع. تمثل المعلمة p الحد الأدنى لاحتمالية اعتبار الرمز مقارنة باحتمالية الرمز الأكثر احتمالاً. على سبيل المثال، مع p=0.05 والرمز الأكثر احتمالاً لديه احتمال 0.9، يتم ترشيح القيم الأقل من 0.045.", + "Always": "دائمًا", + "Amazing": "رائع", + "an assistant": "مساعد", + "Analyzed": "تم التحليل", + "Analyzing...": "جارٍ التحليل...", + "and": "و", + "and {{COUNT}} more": "و{{COUNT}} المزيد", + "and create a new shared link.": "وإنشاء رابط مشترك جديد.", + "API Base URL": "الرابط الأساسي لواجهة API", + "API Key": "مفتاح واجهة برمجة التطبيقات (API)", + "API Key created.": "تم إنشاء مفتاح واجهة API.", + "API Key Endpoint Restrictions": "قيود نقاط نهاية مفتاح API", + "API keys": "مفاتيح واجهة برمجة التطبيقات (API)", + "Application DN": "DN التطبيق", + "Application DN Password": "كلمة مرور DN التطبيق", + "applies to all users with the \"user\" role": "ينطبق على جميع المستخدمين الذين لديهم دور \"مستخدم\"", + "April": "أبريل", + "Archive": "أرشيف", + "Archive All Chats": "أرشفة جميع المحادثات", + "Archived Chats": "المحادثات المؤرشفة", + "archived-chat-export": "تصدير المحادثات المؤرشفة", + "Are you sure you want to clear all memories? This action cannot be undone.": "هل أنت متأكد من رغبتك في مسح جميع الذكريات؟ لا يمكن التراجع عن هذا الإجراء.", + "Are you sure you want to delete this channel?": "هل أنت متأكد من رغبتك في حذف هذه القناة؟", + "Are you sure you want to delete this message?": "هل أنت متأكد من رغبتك في حذف هذه الرسالة؟", + "Are you sure you want to unarchive all archived chats?": "هل أنت متأكد من رغبتك في إلغاء أرشفة جميع المحادثات المؤرشفة؟", + "Are you sure?": "هل أنت متأكد؟", + "Arena Models": "نماذج الساحة", + "Artifacts": "القطع الأثرية", + "Ask": "اسأل", + "Ask a question": "اطرح سؤالاً", + "Assistant": "المساعد", + "Attach file from knowledge": "إرفاق ملف من المعرفة", + "Attention to detail": "الاهتمام بالتفاصيل", + "Attribute for Mail": "خاصية للبريد", + "Attribute for Username": "خاصية لاسم المستخدم", + "Audio": "الصوت", + "August": "أغسطس", + "Authenticate": "توثيق", + "Authentication": "المصادقة", + "Auto-Copy Response to Clipboard": "نسخ الرد تلقائيًا إلى الحافظة", + "Auto-playback response": "تشغيل الرد تلقائيًا", + "Autocomplete Generation": "توليد الإكمال التلقائي", + "Autocomplete Generation Input Max Length": "الحد الأقصى لطول مدخل توليد الإكمال التلقائي", + "Automatic1111": "Automatic1111 (أوتوماتيك 1111)", + "AUTOMATIC1111 Api Auth String": "سلسلة توثيق API لـ AUTOMATIC1111", + "AUTOMATIC1111 Base URL": "الرابط الأساسي لـ AUTOMATIC1111", + "AUTOMATIC1111 Base URL is required.": "الرابط الأساسي لـ AUTOMATIC1111 مطلوب.", + "Available list": "القائمة المتاحة", + "available!": "متاح!", + "Awful": "فظيع", + "Azure AI Speech": "نطق Azure AI (مايكروسوفت)", + "Azure Region": "منطقة Azure", + "Back": "عودة", + "Bad Response": "رد سيئ", + "Banners": "لافتات", + "Base Model (From)": "النموذج الأساسي (من)", + "Batch Size (num_batch)": "حجم الدفعة (num_batch)", + "before": "قبل", + "Being lazy": "كونك كسولاً", + "Beta": "بيتا", + "Bing Search V7 Endpoint": "نقطة نهاية Bing Search V7", + "Bing Search V7 Subscription Key": "مفتاح اشتراك Bing Search V7", + "Bocha Search API Key": "مفتاح API لـ Bocha Search", + "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "تعزيز أو معاقبة رموز محددة لردود مقيدة. ستتراوح قيم التحيز بين -100 و100 (شاملة). (افتراضي: لا شيء)", + "Brave Search API Key": "مفتاح API لـ Brave Search", + "By {{name}}": "بواسطة {{name}}", + "Bypass Embedding and Retrieval": "تجاوز التضمين والاسترجاع", + "Bypass SSL verification for Websites": "تجاوز التحقق من SSL للمواقع", + "Calendar": "التقويم", + "Call": "مكالمة", + "Call feature is not supported when using Web STT engine": "ميزة الاتصال غير مدعومة عند استخدام محرك Web STT", + "Camera": "الكاميرا", + "Cancel": "إلغاء", + "Capabilities": "القدرات", + "Capture": "التقاط", + "Certificate Path": "مسار الشهادة", + "Change Password": "تغيير كلمة المرور", + "Channel Name": "اسم القناة", + "Channels": "القنوات", + "Character": "الشخصية", + "Character limit for autocomplete generation input": "حد الأحرف لمدخل توليد الإكمال التلقائي", + "Chart new frontiers": "رسم آفاق جديدة", + "Chat": "محادثة", + "Chat Background Image": "صورة خلفية المحادثة", + "Chat Bubble UI": "واجهة فقاعات المحادثة", + "Chat Controls": "ضوابط المحادثة", + "Chat direction": "اتجاه المحادثة", + "Chat Overview": "نظرة عامة على المحادثة", + "Chat Permissions": "أذونات المحادثة", + "Chat Tags Auto-Generation": "الإنشاء التلقائي لطاچ المحادثة", + "Chats": "المحادثات", + "Check Again": "تحقق مرة أخرى", + "Check for updates": "تحقق من التحديثات", + "Checking for updates...": "جارٍ التحقق من التحديثات...", + "Choose a model before saving...": "اختر نموذجًا قبل الحفظ...", + "Chunk Overlap": "تداخل القطع", + "Chunk Size": "حجم القطعة", + "Ciphers": "التشفيرات", + "Citation": "اقتباس", + "Clear memory": "مسح الذاكرة", + "Clear Memory": "مسح الذاكرة", + "click here": "انقر هنا", + "Click here for filter guides.": "انقر هنا للحصول على أدلة الفلاتر.", + "Click here for help.": "انقر هنا للمساعدة.", + "Click here to": "انقر هنا لـ", + "Click here to download user import template file.": "انقر هنا لتنزيل ملف قالب استيراد المستخدم.", + "Click here to learn more about faster-whisper and see the available models.": "انقر هنا لمعرفة المزيد عن faster-whisper ورؤية النماذج المتاحة.", + "Click here to see available models.": "انقر هنا لرؤية النماذج المتاحة.", + "Click here to select": "انقر هنا للاختيار", + "Click here to select a csv file.": "انقر هنا لاختيار ملف CSV.", + "Click here to select a py file.": "انقر هنا لاختيار ملف PY.", + "Click here to upload a workflow.json file.": "انقر هنا لتحميل ملف workflow.json.", + "click here.": "انقر هنا.", + "Click on the user role button to change a user's role.": "انقر على زر دور المستخدم لتغيير دور المستخدم.", + "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "تم رفض إذن الكتابة إلى الحافظة. يرجى التحقق من إعدادات المتصفح لمنح الوصول اللازم.", + "Clone": "استنساخ", + "Clone Chat": "استنساخ المحادثة", + "Clone of {{TITLE}}": "استنساخ لـ {{TITLE}}", + "Close": "إغلاق", + "Code execution": "تنفيذ الشيفرة", + "Code Execution": "تنفيذ الشيفرة", + "Code Execution Engine": "محرك تنفيذ الشيفرة", + "Code Execution Timeout": "مهلة تنفيذ الشيفرة", + "Code formatted successfully": "تم تنسيق الشيفرة بنجاح", + "Code Interpreter": "مفسر الشيفرة", + "Code Interpreter Engine": "محرك مفسر الشيفرة", + "Code Interpreter Prompt Template": "قالب موجه مفسر الشيفرة", + "Collapse": "طي", + "Collection": "المجموعة", + "Color": "اللون", + "ComfyUI": "ComfyUI", + "ComfyUI API Key": "مفتاح API لـ ComfyUI", + "ComfyUI Base URL": "عنوان الأساس لـ ComfyUI", + "ComfyUI Base URL is required.": "عنوان الأساس لـ ComfyUI مطلوب.", + "ComfyUI Workflow": "سير عمل ComfyUI", + "ComfyUI Workflow Nodes": "عقد سير عمل ComfyUI", + "Command": "الأمر", + "Completions": "الإكمالات", + "Concurrent Requests": "الطلبات المتزامنة", + "Configure": "تكوين", + "Confirm": "تأكيد", + "Confirm Password": "تأكيد كلمة المرور", + "Confirm your action": "أكد إجراءك", + "Confirm your new password": "أكد كلمة مرورك الجديدة", + "Connect to your own OpenAI compatible API endpoints.": "اتصل بنقاط نهاية API المتوافقة مع OpenAI الخاصة بك.", + "Connections": "الاتصالات", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "يقيّد الجهد في التفكير لنماذج التفكير. ينطبق فقط على نماذج التفكير من مقدمي خدمات محددين يدعمون جهد التفكير.", + "Contact Admin for WebUI Access": "اتصل بالمسؤول للوصول إلى واجهة الويب", + "Content": "المحتوى", + "Content Extraction Engine": "محرك استخراج المحتوى", + "Context Length": "طول السياق", + "Continue Response": "متابعة الرد", + "Continue with {{provider}}": "متابعة مع {{provider}}", + "Continue with Email": "متابعة باستخدام البريد الإلكتروني", + "Continue with LDAP": "متابعة باستخدام LDAP", + "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "تحكم في كيفية تقسيم نص الرسالة لطلبات تحويل النص إلى كلام. 'علامات الترقيم' تقسم إلى جمل، 'الفقرات' تقسم إلى فقرات، و'لا شيء' يحتفظ بالرسالة كسلسلة واحدة.", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "تحكم في تكرار تسلسلات الرموز في النص المولد. قيمة أعلى (مثال: 1.5) ستعاقب التكرارات بشدة أكبر، بينما قيمة أقل (مثال: 1.1) ستكون أكثر تسامحاً. عند القيمة 1، يتم تعطيله.", + "Controls": "الضوابط", + "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "يتحكم في التوازن بين الترابط والتنوع في الناتج. قيمة أقل ستؤدي إلى نص أكثر تركيزاً وترابطاً.", + "Copied": "تم النسخ", + "Copied shared chat URL to clipboard!": "تم نسخ رابط المحادثة المشترك إلى الحافظة!", + "Copied to clipboard": "تم النسخ إلى الحافظة", + "Copy": "نسخ", + "Copy last code block": "نسخ آخر كتلة شيفرة", + "Copy last response": "نسخ آخر رد", + "Copy Link": "نسخ الرابط", + "Copy to clipboard": "نسخ إلى الحافظة", + "Copying to clipboard was successful!": "تم النسخ إلى الحافظة بنجاح!", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "يجب أن يتم تكوين CORS بشكل صحيح من قبل المزود للسماح بالطلبات من Open WebUI.", + "Create": "إنشاء", + "Create a knowledge base": "إنشاء قاعدة معرفة", + "Create a model": "إنشاء نموذج", + "Create Account": "إنشاء حساب", + "Create Admin Account": "إنشاء حساب مسؤول", + "Create Channel": "إنشاء قناة", + "Create Group": "إنشاء مجموعة", + "Create Knowledge": "إنشاء معرفة", + "Create new key": "إنشاء مفتاح جديد", + "Create new secret key": "إنشاء مفتاح سري جديد", + "Created at": "تم الإنشاء في", + "Created At": "تاريخ الإنشاء", + "Created by": "تم الإنشاء بواسطة", + "CSV Import": "استيراد CSV", + "Ctrl+Enter to Send": "اضغط Ctrl+Enter للإرسال", + "Current Model": "النموذج الحالي", + "Current Password": "كلمة المرور الحالية", + "Custom": "مخصص", + "Danger Zone": "منطقة الخطر", + "Dark": "داكن", + "Database": "قاعدة البيانات", + "December": "ديسمبر", + "Default": "افتراضي", + "Default (Open AI)": "افتراضي (Open AI)", + "Default (SentenceTransformers)": "افتراضي (SentenceTransformers)", + "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model’s built-in tool-calling capabilities, but requires the model to inherently support this feature.": "الوضع الافتراضي يعمل مع مجموعة أوسع من النماذج من خلال استدعاء الأدوات مرة واحدة قبل التنفيذ. أما الوضع الأصلي فيستخدم قدرات استدعاء الأدوات المدمجة في النموذج، لكنه يتطلب دعمًا داخليًا لهذه الميزة.", + "Default Model": "النموذج الافتراضي", + "Default model updated": "الإفتراضي تحديث الموديل", + "Default Models": "النماذج الافتراضية", + "Default permissions": "الأذونات الافتراضية", + "Default permissions updated successfully": "تم تحديث الأذونات الافتراضية بنجاح", + "Default Prompt Suggestions": "الإفتراضي Prompt الاقتراحات", + "Default to 389 or 636 if TLS is enabled": "الافتراضي هو 389 أو 636 إذا تم تمكين TLS", + "Default to ALL": "الافتراضي هو الكل", + "Default User Role": "الإفتراضي صلاحيات المستخدم", + "Delete": "حذف", + "Delete a model": "حذف الموديل", + "Delete All Chats": "حذف جميع الدردشات", + "Delete All Models": "حذف جميع النماذج", + "Delete chat": "حذف المحادثه", + "Delete Chat": "حذف المحادثه.", + "Delete chat?": "هل تريد حذف المحادثة؟", + "Delete folder?": "هل تريد حذف المجلد؟", + "Delete function?": "هل تريد حذف الوظيفة؟", + "Delete Message": "حذف الرسالة", + "Delete message?": "هل تريد حذف الرسالة؟", + "Delete prompt?": "هل تريد حذف الموجه؟", + "delete this link": "أحذف هذا الرابط", + "Delete tool?": "هل تريد حذف الأداة؟", + "Delete User": "حذف المستخدم", + "Deleted {{deleteModelTag}}": "{{deleteModelTag}} حذف", + "Deleted {{name}}": "حذف {{name}}", + "Deleted User": "مستخدم محذوف", + "Describe your knowledge base and objectives": "صف قاعدة معرفتك وأهدافك", + "Description": "وصف", + "Didn't fully follow instructions": "لم أتبع التعليمات بشكل كامل", + "Direct Connections": "الاتصالات المباشرة", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "تتيح الاتصالات المباشرة للمستخدمين الاتصال بنقاط نهاية API متوافقة مع OpenAI الخاصة بهم.", + "Direct Connections settings updated": "تم تحديث إعدادات الاتصالات المباشرة", + "Disabled": "معطّل", + "Discover a function": "اكتشف وظيفة", + "Discover a model": "اكتشف نموذجا", + "Discover a prompt": "اكتشاف موجه", + "Discover a tool": "اكتشف أداة", + "Discover how to use Open WebUI and seek support from the community.": "اكتشف كيفية استخدام Open WebUI واطلب الدعم من المجتمع.", + "Discover wonders": "اكتشف العجائب", + "Discover, download, and explore custom functions": "اكتشف، حمّل، واستعرض الوظائف المخصصة", + "Discover, download, and explore custom prompts": "اكتشاف وتنزيل واستكشاف المطالبات المخصصة", + "Discover, download, and explore custom tools": "اكتشف، حمّل، واستعرض الأدوات المخصصة", + "Discover, download, and explore model presets": "اكتشاف وتنزيل واستكشاف الإعدادات المسبقة للنموذج", + "Dismissible": "يمكن تجاهله", + "Display": "العرض", + "Display Emoji in Call": "عرض الرموز التعبيرية أثناء المكالمة", + "Display the username instead of You in the Chat": "اعرض اسم المستخدم بدلاً منك في الدردشة", + "Displays citations in the response": "عرض المراجع في الرد", + "Dive into knowledge": "انغمس في المعرفة", + "Do not install functions from sources you do not fully trust.": "لا تقم بتثبيت الوظائف من مصادر لا تثق بها تمامًا.", + "Do not install tools from sources you do not fully trust.": "لا تقم بتثبيت الأدوات من مصادر لا تثق بها تمامًا.", + "Document": "المستند", + "Document Intelligence": "تحليل المستندات الذكي", + "Document Intelligence endpoint and key required.": "يتطلب نقطة نهاية ومفتاح لتحليل المستندات.", + "Documentation": "التوثيق", + "Documents": "مستندات", + "does not make any external connections, and your data stays securely on your locally hosted server.": "لا يجري أي اتصالات خارجية، وتظل بياناتك آمنة على الخادم المستضاف محليًا.", + "Domain Filter List": "قائمة تصفية النطاقات", + "Don't have an account?": "ليس لديك حساب؟", + "don't install random functions from sources you don't trust.": "لا تقم بتثبيت وظائف عشوائية من مصادر غير موثوقة.", + "don't install random tools from sources you don't trust.": "لا تقم بتثبيت أدوات عشوائية من مصادر غير موثوقة.", + "Don't like the style": "لا أحب النمط", + "Done": "تم", + "Download": "تحميل", + "Download as SVG": "تنزيل بصيغة SVG", + "Download canceled": "تم اللغاء التحميل", + "Download Database": "تحميل قاعدة البيانات", + "Drag and drop a file to upload or select a file to view": "اسحب الملف وأفلته للرفع أو اختر ملفًا للعرض", + "Draw": "ارسم", + "Drop any files here to add to the conversation": "أسقط أية ملفات هنا لإضافتها إلى المحادثة", + "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "e.g. '30s','10m'. الوحدات الزمنية الصالحة هي 's', 'm', 'h'.", + "e.g. 60": "مثال: 60", + "e.g. A filter to remove profanity from text": "مثال: مرشح لإزالة الألفاظ النابية من النص", + "e.g. My Filter": "مثال: مرشحي", + "e.g. My Tools": "مثال: أدواتي", + "e.g. my_filter": "مثال: my_filter", + "e.g. my_tools": "مثال: my_tools", + "e.g. Tools for performing various operations": "مثال: أدوات لتنفيذ عمليات متنوعة", + "Edit": "تعديل", + "Edit Arena Model": "تعديل نموذج Arena", + "Edit Channel": "تعديل القناة", + "Edit Connection": "تعديل الاتصال", + "Edit Default Permissions": "تعديل الأذونات الافتراضية", + "Edit Memory": "تعديل الذاكرة", + "Edit User": "تعديل المستخدم", + "Edit User Group": "تعديل مجموعة المستخدمين", + "ElevenLabs": "ElevenLabs", + "Email": "البريد", + "Embark on adventures": "انطلق في مغامرات", + "Embedding": "تضمين", + "Embedding Batch Size": "حجم دفعة التضمين", + "Embedding Model": "نموذج التضمين", + "Embedding Model Engine": "تضمين محرك النموذج", + "Embedding model set to \"{{embedding_model}}\"": "تم تعيين نموذج التضمين على \"{{embedding_model}}\"", + "Enable API Key": "تفعيل مفتاح API", + "Enable autocomplete generation for chat messages": "تفعيل توليد الإكمال التلقائي لرسائل الدردشة", + "Enable Code Execution": "تفعيل تنفيذ الكود", + "Enable Code Interpreter": "تفعيل مفسر الكود", + "Enable Community Sharing": "تمكين مشاركة المجتمع", + "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "تفعيل قفل الذاكرة (mlock) لمنع إخراج بيانات النموذج من الذاكرة. يساعد هذا الخيار في الحفاظ على الأداء من خلال منع حدوث أخطاء في الوصول وضمان سرعة الوصول إلى البيانات.", + "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "تفعيل تعيين الذاكرة (mmap) لتحميل بيانات النموذج. يسمح هذا الخيار للنظام باستخدام التخزين كامتداد للذاكرة RAM عن طريق معاملة ملفات القرص كما لو كانت في RAM. قد يحسن أداء النموذج، لكن قد لا يعمل بشكل صحيح مع جميع الأنظمة وقد يستهلك مساحة كبيرة من القرص.", + "Enable Message Rating": "تفعيل تقييم الرسائل", + "Enable Mirostat sampling for controlling perplexity.": "تفعيل أخذ عينات Mirostat للتحكم في درجة التعقيد.", + "Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة", + "Enabled": "مفعل", + "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.", + "Enter {{role}} message here": "أدخل رسالة {{role}} هنا", + "Enter a detail about yourself for your LLMs to recall": "ادخل معلومات عنك تريد أن يتذكرها الموديل", + "Enter api auth string (e.g. username:password)": "أدخل سلسلة توثيق API (مثال: username:password)", + "Enter Application DN": "أدخل DN التطبيق", + "Enter Application DN Password": "أدخل كلمة مرور DN التطبيق", + "Enter Bing Search V7 Endpoint": "أدخل نقطة نهاية Bing Search V7", + "Enter Bing Search V7 Subscription Key": "أدخل مفتاح اشتراك Bing Search V7", + "Enter Bocha Search API Key": "أدخل مفتاح API لـ Bocha Search", + "Enter Brave Search API Key": "أدخل مفتاح واجهة برمجة تطبيقات البحث الشجاع", + "Enter certificate path": "أدخل مسار الشهادة", + "Enter CFG Scale (e.g. 7.0)": "أدخل مقياس CFG (مثال: 7.0)", + "Enter Chunk Overlap": "أدخل الChunk Overlap", + "Enter Chunk Size": "أدخل Chunk الحجم", + "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "أدخل أزواج \"الرمز:قيمة التحيز\" مفصولة بفواصل (مثال: 5432:100، 413:-100)", + "Enter description": "أدخل الوصف", + "Enter Document Intelligence Endpoint": "أدخل نقطة نهاية تحليل المستندات", + "Enter Document Intelligence Key": "أدخل مفتاح تحليل المستندات", + "Enter domains separated by commas (e.g., example.com,site.org)": "أدخل النطاقات مفصولة بفواصل (مثال: example.com,site.org)", + "Enter Exa API Key": "أدخل مفتاح API لـ Exa", + "Enter Github Raw URL": "أدخل عنوان URL ل Github Raw", + "Enter Google PSE API Key": "أدخل مفتاح واجهة برمجة تطبيقات PSE من Google", + "Enter Google PSE Engine Id": "أدخل معرف محرك PSE من Google", + "Enter Image Size (e.g. 512x512)": "(e.g. 512x512) أدخل حجم الصورة ", + "Enter Jina API Key": "أدخل مفتاح API لـ Jina", + "Enter Jupyter Password": "أدخل كلمة مرور Jupyter", + "Enter Jupyter Token": "أدخل رمز Jupyter", + "Enter Jupyter URL": "أدخل عنوان Jupyter", + "Enter Kagi Search API Key": "أدخل مفتاح API لـ Kagi Search", + "Enter Key Behavior": "أدخل سلوك المفتاح", + "Enter language codes": "أدخل كود اللغة", + "Enter Model ID": "أدخل معرف النموذج", + "Enter model tag (e.g. {{modelTag}})": "(e.g. {{modelTag}}) أدخل الموديل تاق", + "Enter Mojeek Search API Key": "أدخل مفتاح API لـ Mojeek Search", + "Enter Number of Steps (e.g. 50)": "(e.g. 50) أدخل عدد الخطوات", + "Enter Perplexity API Key": "أدخل مفتاح API لـ Perplexity", + "Enter proxy URL (e.g. https://user:password@host:port)": "أدخل عنوان البروكسي (مثال: https://user:password@host:port)", + "Enter reasoning effort": "أدخل مستوى الجهد في الاستدلال", + "Enter Sampler (e.g. Euler a)": "أدخل العينة (مثال: Euler a)", + "Enter Scheduler (e.g. Karras)": "أدخل المجدول (مثال: Karras)", + "Enter Score": "أدخل النتيجة", + "Enter SearchApi API Key": "أدخل مفتاح API لـ SearchApi", + "Enter SearchApi Engine": "أدخل محرك SearchApi", + "Enter Searxng Query URL": "أدخل عنوان URL لاستعلام Searxng", + "Enter Seed": "أدخل القيمة الابتدائية (Seed)", + "Enter SerpApi API Key": "أدخل مفتاح API لـ SerpApi", + "Enter SerpApi Engine": "أدخل محرك SerpApi", + "Enter Serper API Key": "أدخل مفتاح واجهة برمجة تطبيقات Serper", + "Enter Serply API Key": "أدخل مفتاح API لـ Serply", + "Enter Serpstack API Key": "أدخل مفتاح واجهة برمجة تطبيقات Serpstack", + "Enter server host": "أدخل مضيف الخادم", + "Enter server label": "أدخل تسمية الخادم", + "Enter server port": "أدخل منفذ الخادم", + "Enter stop sequence": "أدخل تسلسل التوقف", + "Enter system prompt": "أدخل موجه النظام", + "Enter Tavily API Key": "أدخل مفتاح API لـ Tavily", + "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "أدخل الرابط العلني لـ WebUI الخاص بك. سيتم استخدام هذا الرابط لإنشاء روابط داخل الإشعارات.", + "Enter Tika Server URL": "أدخل رابط خادم Tika", + "Enter timeout in seconds": "أدخل المهلة بالثواني", + "Enter to Send": "اضغط Enter للإرسال", + "Enter Top K": "أدخل Top K", + "Enter URL (e.g. http://127.0.0.1:7860/)": "الرابط (e.g. http://127.0.0.1:7860/)", + "Enter URL (e.g. http://localhost:11434)": "URL (e.g. http://localhost:11434)", + "Enter your current password": "أدخل كلمة المرور الحالية", + "Enter Your Email": "أدخل البريد الاكتروني", + "Enter Your Full Name": "أدخل الاسم كامل", + "Enter your message": "أدخل رسالتك", + "Enter your new password": "أدخل كلمة المرور الجديدة", + "Enter Your Password": "ادخل كلمة المرور", + "Enter Your Role": "أدخل الصلاحيات", + "Enter Your Username": "أدخل اسم المستخدم الخاص بك", + "Enter your webhook URL": "أدخل رابط Webhook الخاص بك", + "Error": "خطأ", + "ERROR": "خطأ", + "Error accessing Google Drive: {{error}}": "حدث خطأ أثناء الوصول إلى Google Drive: {{error}}", + "Error uploading file: {{error}}": "حدث خطأ أثناء تحميل الملف: {{error}}", + "Evaluations": "التقييمات", + "Exa API Key": "مفتاح API لـ Exa", + "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "مثال: (&(objectClass=inetOrgPerson)(uid=%s))", + "Example: ALL": "مثال: ALL", + "Example: mail": "مثال: mail", + "Example: ou=users,dc=foo,dc=example": "مثال: ou=users,dc=foo,dc=example", + "Example: sAMAccountName or uid or userPrincipalName": "مثال: sAMAccountName أو uid أو userPrincipalName", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "تجاوزت عدد المقاعد المسموح بها في الترخيص. يرجى الاتصال بالدعم لزيادتها.", + "Exclude": "استبعاد", + "Execute code for analysis": "تنفيذ الكود للتحليل", + "Expand": "توسيع", + "Experimental": "تجريبي", + "Explain": "شرح", + "Explain this section to me in more detail": "اشرح لي هذا القسم بمزيد من التفصيل", + "Explore the cosmos": "استكشف الكون", + "Export": "تصدير", + "Export All Archived Chats": "تصدير جميع المحادثات المؤرشفة", + "Export All Chats (All Users)": "تصدير جميع الدردشات (جميع المستخدمين)", + "Export chat (.json)": "تصدير المحادثة (.json)", + "Export Chats": "تصدير جميع الدردشات", + "Export Config to JSON File": "تصدير الإعدادات إلى ملف JSON", + "Export Functions": "تصدير الوظائف", + "Export Models": "نماذج التصدير", + "Export Presets": "تصدير الإعدادات المسبقة", + "Export Prompts": "مطالبات التصدير", + "Export to CSV": "تصدير إلى CSV", + "Export Tools": "تصدير الأدوات", + "External Models": "نماذج خارجية", + "Failed to add file.": "فشل في إضافة الملف.", + "Failed to create API Key.": "فشل في إنشاء مفتاح API.", + "Failed to fetch models": "فشل في جلب النماذج", + "Failed to read clipboard contents": "فشل في قراءة محتويات الحافظة", + "Failed to save models configuration": "فشل في حفظ إعدادات النماذج", + "Failed to update settings": "فشل في تحديث الإعدادات", + "Failed to upload file.": "فشل في رفع الملف.", + "Features": "الميزات", + "Features Permissions": "أذونات الميزات", + "February": "فبراير", + "Feedback History": "سجل الملاحظات", + "Feedbacks": "الملاحظات", + "Feel free to add specific details": "لا تتردد في إضافة تفاصيل محددة", + "File": "ملف", + "File added successfully.": "تم إضافة الملف بنجاح.", + "File content updated successfully.": "تم تحديث محتوى الملف بنجاح.", + "File Mode": "وضع الملف", + "File not found.": "لم يتم العثور على الملف.", + "File removed successfully.": "تم حذف الملف بنجاح.", + "File size should not exceed {{maxSize}} MB.": "يجب ألا يتجاوز حجم الملف {{maxSize}} ميغابايت.", + "File uploaded successfully": "تم رفع الملف بنجاح", + "Files": "الملفات", + "Filter is now globally disabled": "تم الآن تعطيل الفلتر على مستوى النظام", + "Filter is now globally enabled": "تم الآن تفعيل الفلتر على مستوى النظام", + "Filters": "الفلاتر", + "Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "تم اكتشاف انتحال بصمة الإصبع: غير قادر على استخدام الأحرف الأولى كصورة رمزية. الافتراضي لصورة الملف الشخصي الافتراضية.", + "Fluidly stream large external response chunks": "دفق قطع الاستجابة الخارجية الكبيرة بسلاسة", + "Focus chat input": "التركيز على إدخال الدردشة", + "Folder deleted successfully": "تم حذف المجلد بنجاح", + "Folder name cannot be empty": "لا يمكن أن يكون اسم المجلد فارغًا", + "Folder name cannot be empty.": "لا يمكن أن يكون اسم المجلد فارغًا.", + "Folder name updated successfully": "تم تحديث اسم المجلد بنجاح", + "Followed instructions perfectly": "اتبعت التعليمات على أكمل وجه", + "Forge new paths": "أنشئ مسارات جديدة", + "Form": "نموذج", + "Format your variables using brackets like this:": "نسّق متغيراتك باستخدام الأقواس بهذا الشكل:", + "Frequency Penalty": "عقوبة التردد", + "Full Context Mode": "وضع السياق الكامل", + "Function": "وظيفة", + "Function Calling": "استدعاء الوظائف", + "Function created successfully": "تم إنشاء الوظيفة بنجاح", + "Function deleted successfully": "تم حذف الوظيفة بنجاح", + "Function Description": "وصف الوظيفة", + "Function ID": "معرف الوظيفة", + "Function is now globally disabled": "تم الآن تعطيل الوظيفة على مستوى النظام", + "Function is now globally enabled": "تم الآن تفعيل الوظيفة على مستوى النظام", + "Function Name": "اسم الوظيفة", + "Function updated successfully": "تم تحديث الوظيفة بنجاح", + "Functions": "الوظائف", + "Functions allow arbitrary code execution": "الوظائف تتيح تنفيذ كود برمجي مخصص", + "Functions allow arbitrary code execution.": "الوظائف تتيح تنفيذ كود برمجي مخصص.", + "Functions imported successfully": "تم استيراد الوظائف بنجاح", + "Gemini": "Gemini", + "Gemini API Config": "إعدادات واجهة Gemini API", + "Gemini API Key is required.": "مفتاح Gemini API مطلوب.", + "General": "عام", + "Generate an image": "توليد صورة", + "Generate Image": "توليد صورة", + "Generate prompt pair": "توليد زوج من التعليمات", + "Generating search query": "إنشاء استعلام بحث", + "Get started": "ابدأ الآن", + "Get started with {{WEBUI_NAME}}": "ابدأ باستخدام {{WEBUI_NAME}}", + "Global": "عام", + "Good Response": "استجابة جيدة", + "Google Drive": "Google Drive", + "Google PSE API Key": "مفتاح واجهة برمجة تطبيقات PSE من Google", + "Google PSE Engine Id": "معرف محرك PSE من Google", + "Group created successfully": "تم إنشاء المجموعة بنجاح", + "Group deleted successfully": "تم حذف المجموعة بنجاح", + "Group Description": "وصف المجموعة", + "Group Name": "اسم المجموعة", + "Group updated successfully": "تم تحديث المجموعة بنجاح", + "Groups": "المجموعات", + "Haptic Feedback": "الاهتزاز اللمسي", + "has no conversations.": "ليس لديه محادثات.", + "Hello, {{name}}": " {{name}} مرحبا", + "Help": "مساعدة", + "Help us create the best community leaderboard by sharing your feedback history!": "ساعدنا في إنشاء أفضل قائمة للمتصدرين من خلال مشاركة سجل ملاحظاتك!", + "Hex Color": "لون سداسي", + "Hex Color - Leave empty for default color": "اللون السداسي - اتركه فارغًا لاستخدام اللون الافتراضي", + "Hide": "أخفاء", + "Home": "الصفحة الرئيسية", + "Host": "المضيف", + "How can I help you today?": "كيف استطيع مساعدتك اليوم؟", + "How would you rate this response?": "كيف تقيّم هذا الرد؟", + "Hybrid Search": "البحث الهجين", + "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "أقر بأنني قرأت وفهمت تبعات هذا الإجراء. أنا على دراية بالمخاطر المرتبطة بتنفيذ كود عشوائي وقد تحققت من موثوقية المصدر.", + "ID": "المعرّف", + "Ignite curiosity": "أشعل الفضول", + "Image": "صورة", + "Image Compression": "ضغط الصور", + "Image Generation": "توليد الصور", + "Image Generation (Experimental)": "توليد الصور (تجريبي)", + "Image Generation Engine": "محرك توليد الصور", + "Image Max Compression Size": "الحد الأقصى لضغط الصورة", + "Image Prompt Generation": "توليد التوجيه للصورة", + "Image Prompt Generation Prompt": "نص توجيه توليد الصورة", + "Image Settings": "إعدادات الصورة", + "Images": "الصور", + "Import Chats": "استيراد الدردشات", + "Import Config from JSON File": "استيراد الإعدادات من ملف JSON", + "Import Functions": "استيراد الوظائف", + "Import Models": "استيراد النماذج", + "Import Presets": "استيراد الإعدادات المسبقة", + "Import Prompts": "مطالبات الاستيراد", + "Import Tools": "استيراد الأدوات", + "Include": "تضمين", + "Include `--api-auth` flag when running stable-diffusion-webui": "أضف الخيار `--api-auth` عند تشغيل stable-diffusion-webui", + "Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "يؤثر على سرعة استجابة الخوارزمية للتغذية الراجعة من النص المُولد. معدل تعلم منخفض يؤدي إلى تعديلات أبطأ، بينما معدل أعلى يجعلها أكثر استجابة.", + "Info": "معلومات", + "Input commands": "إدخال الأوامر", + "Install from Github URL": "التثبيت من عنوان URL لجيثب", + "Instant Auto-Send After Voice Transcription": "إرسال تلقائي فوري بعد تحويل الصوت إلى نص", + "Integration": "التكامل", + "Interface": "واجهه المستخدم", + "Invalid file format.": "تنسيق ملف غير صالح.", + "Invalid Tag": "تاق غير صالحة", + "is typing...": "يكتب...", + "January": "يناير", + "Jina API Key": "مفتاح API لـ Jina", + "join our Discord for help.": "انضم إلى Discord للحصول على المساعدة.", + "JSON": "JSON", + "JSON Preview": "معاينة JSON", + "July": "يوليو", + "June": "يونيو", + "Jupyter Auth": "مصادقة Jupyter", + "Jupyter URL": "رابط Jupyter", + "JWT Expiration": "JWT تجريبي", + "JWT Token": "JWT Token", + "Kagi Search API Key": "مفتاح API لـ Kagi Search", + "Keep Alive": "Keep Alive", + "Key": "المفتاح", + "Keyboard shortcuts": "اختصارات لوحة المفاتيح", + "Knowledge": "المعرفة", + "Knowledge Access": "الوصول إلى المعرفة", + "Knowledge created successfully.": "تم إنشاء المعرفة بنجاح.", + "Knowledge deleted successfully.": "تم حذف المعرفة بنجاح.", + "Knowledge reset successfully.": "تم إعادة تعيين المعرفة بنجاح.", + "Knowledge updated successfully": "تم تحديث المعرفة بنجاح", + "Kokoro.js (Browser)": "Kokoro.js (المتصفح)", + "Kokoro.js Dtype": "نوع بيانات Kokoro.js", + "Label": "التسمية", + "Landing Page Mode": "وضع الصفحة الرئيسية", + "Language": "اللغة", + "Last Active": "آخر نشاط", + "Last Modified": "آخر تعديل", + "Last reply": "آخر رد", + "LDAP": "LDAP", + "LDAP server updated": "تم تحديث خادم LDAP", + "Leaderboard": "لوحة المتصدرين", + "Leave empty for unlimited": "اتركه فارغًا لعدم وجود حد", + "Leave empty to include all models from \"{{URL}}/api/tags\" endpoint": "اتركه فارغًا لتضمين جميع النماذج من نقطة النهاية \"{{URL}}/api/tags\"", + "Leave empty to include all models from \"{{URL}}/models\" endpoint": "اتركه فارغًا لتضمين جميع النماذج من نقطة النهاية \"{{URL}}/models\"", + "Leave empty to include all models or select specific models": "اتركه فارغًا لتضمين جميع النماذج أو اختر نماذج محددة", + "Leave empty to use the default prompt, or enter a custom prompt": "اتركه فارغًا لاستخدام التوجيه الافتراضي، أو أدخل توجيهًا مخصصًا", + "Leave model field empty to use the default model.": "اترك حقل النموذج فارغًا لاستخدام النموذج الافتراضي.", + "License": "الترخيص", + "Light": "فاتح", + "Listening...": "جارٍ الاستماع...", + "Llama.cpp": "Llama.cpp", + "LLMs can make mistakes. Verify important information.": "يمكن أن تصدر بعض الأخطاء. لذلك يجب التحقق من المعلومات المهمة", + "Loader": "المحمّل", + "Loading Kokoro.js...": "جارٍ تحميل Kokoro.js...", + "Local": "محلي", + "Local Models": "النماذج المحلية", + "Location access not allowed": "لا يُسمح بالوصول إلى الموقع", + "Logit Bias": "تحيّز Logit", + "Lost": "ضائع", + "LTR": "من جهة اليسار إلى اليمين", + "Made by Open WebUI Community": "OpenWebUI تم إنشاؤه بواسطة مجتمع ", + "Make sure to enclose them with": "تأكد من إرفاقها", + "Make sure to export a workflow.json file as API format from ComfyUI.": "تأكد من تصدير ملف workflow.json بصيغة API من ComfyUI.", + "Manage": "إدارة", + "Manage Direct Connections": "إدارة الاتصالات المباشرة", + "Manage Models": "إدارة النماذج", + "Manage Ollama": "إدارة Ollama", + "Manage Ollama API Connections": "إدارة اتصالات Ollama API", + "Manage OpenAI API Connections": "إدارة اتصالات OpenAI API", + "Manage Pipelines": "إدارة خطوط الأنابيب", + "March": "مارس", + "Max Tokens (num_predict)": "ماكس توكنز (num_predict)", + "Max Upload Count": "الحد الأقصى لعدد التحميلات", + "Max Upload Size": "الحد الأقصى لحجم الملف المرفوع", + "Maximum of 3 models can be downloaded simultaneously. Please try again later.": "يمكن تنزيل 3 نماذج كحد أقصى في وقت واحد. الرجاء معاودة المحاولة في وقت لاحق.", + "May": "مايو", + "Memories accessible by LLMs will be shown here.": "سيتم عرض الذكريات التي يمكن الوصول إليها بواسطة LLMs هنا.", + "Memory": "الذاكرة", + "Memory added successfully": "تم إضافة الذاكرة بنجاح", + "Memory cleared successfully": "تم مسح الذاكرة بنجاح", + "Memory deleted successfully": "تم حذف الذاكرة بنجاح", + "Memory updated successfully": "تم تحديث الذاكرة بنجاح", + "Merge Responses": "دمج الردود", + "Message rating should be enabled to use this feature": "يجب تفعيل تقييم الرسائل لاستخدام هذه الميزة", + "Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "لن تتم مشاركة الرسائل التي ترسلها بعد إنشاء الرابط الخاص بك. سيتمكن المستخدمون الذين لديهم عنوان URL من عرض الدردشة المشتركة", + "Min P": "الحد الأدنى P", + "Minimum Score": "الحد الأدنى من النقاط", + "Mirostat": "Mirostat", + "Mirostat Eta": "Mirostat Eta", + "Mirostat Tau": "Mirostat Tau", + "Model": "النموذج", + "Model '{{modelName}}' has been successfully downloaded.": "تم تحميل النموذج '{{modelName}}' بنجاح", + "Model '{{modelTag}}' is already in queue for downloading.": "النموذج '{{modelTag}}' موجود بالفعل في قائمة الانتظار للتحميل", + "Model {{modelId}} not found": "لم يتم العثور على النموذج {{modelId}}.", + "Model {{modelName}} is not vision capable": "نموذج {{modelName}} غير قادر على الرؤية", + "Model {{name}} is now {{status}}": "نموذج {{name}} هو الآن {{status}}", + "Model accepts image inputs": "النموذج يقبل إدخالات الصور", + "Model created successfully!": "تم إنشاء النموذج بنجاح!", + "Model filesystem path detected. Model shortname is required for update, cannot continue.": "تم اكتشاف مسار نظام الملفات النموذجي. الاسم المختصر للنموذج مطلوب للتحديث، ولا يمكن الاستمرار.", + "Model Filtering": "تصفية النماذج", + "Model ID": "رقم الموديل", + "Model IDs": "معرّفات النماذج", + "Model Name": "اسم النموذج", + "Model not selected": "لم تختار موديل", + "Model Params": "معلمات النموذج", + "Model Permissions": "أذونات النموذج", + "Model updated successfully": "تم تحديث النموذج بنجاح", + "Modelfile Content": "محتوى الملف النموذجي", + "Models": "الموديلات", + "Models Access": "الوصول إلى النماذج", + "Models configuration saved successfully": "تم حفظ إعدادات النماذج بنجاح", + "Mojeek Search API Key": "مفتاح API لـ Mojeek Search", + "more": "المزيد", + "More": "المزيد", + "Name": "الأسم", + "Name your knowledge base": "قم بتسمية قاعدة معرفتك", + "Native": "أصلي", + "New Chat": "دردشة جديدة", + "New Folder": "مجلد جديد", + "New Password": "كلمة المرور الجديدة", + "new-channel": "قناة جديدة", + "No content found": "لم يتم العثور على محتوى", + "No content to speak": "لا يوجد محتوى للتحدث عنه", + "No distance available": "لا توجد مسافة متاحة", + "No feedbacks found": "لم يتم العثور على ملاحظات", + "No file selected": "لم يتم تحديد ملف", + "No files found.": "لم يتم العثور على ملفات.", + "No groups with access, add a group to grant access": "لا توجد مجموعات لها حق الوصول، أضف مجموعة لمنح الوصول", + "No HTML, CSS, or JavaScript content found.": "لم يتم العثور على محتوى HTML أو CSS أو JavaScript.", + "No inference engine with management support found": "لم يتم العثور على محرك استدلال يدعم الإدارة", + "No knowledge found": "لم يتم العثور على معرفة", + "No memories to clear": "لا توجد ذاكرة لمسحها", + "No model IDs": "لا توجد معرّفات نماذج", + "No models found": "لم يتم العثور على نماذج", + "No models selected": "لم يتم اختيار نماذج", + "No results found": "لا توجد نتايج", + "No search query generated": "لم يتم إنشاء استعلام بحث", + "No source available": "لا يوجد مصدر متاح", + "No users were found.": "لم يتم العثور على مستخدمين.", + "No valves to update": "لا توجد صمامات للتحديث", + "None": "اي", + "Not factually correct": "ليس صحيحا من حيث الواقع", + "Not helpful": "غير مفيد", + "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ملاحظة: إذا قمت بتعيين الحد الأدنى من النقاط، فلن يؤدي البحث إلا إلى إرجاع المستندات التي لها نقاط أكبر من أو تساوي الحد الأدنى من النقاط.", + "Notes": "ملاحظات", + "Notification Sound": "صوت الإشعارات", + "Notification Webhook": "رابط Webhook للإشعارات", + "Notifications": "إشعارات", + "November": "نوفمبر", + "num_gpu (Ollama)": "عدد وحدات GPU (لـ Ollama)", + "num_thread (Ollama)": "num_thread (أولاما)", + "OAuth ID": "معرّف OAuth", + "October": "اكتوبر", + "Off": "أغلاق", + "Okay, Let's Go!": "حسنا دعنا نذهب!", + "OLED Dark": "OLED داكن", + "Ollama": "Ollama", + "Ollama API": "أولاما API", + "Ollama API settings updated": "تم تحديث إعدادات واجهة Ollama API", + "Ollama Version": "Ollama الاصدار", + "On": "تشغيل", + "OneDrive": "OneDrive", + "Only alphanumeric characters and hyphens are allowed": "يُسمح فقط بالحروف والأرقام والواصلات", + "Only alphanumeric characters and hyphens are allowed in the command string.": "يُسمح فقط بالأحرف الأبجدية الرقمية والواصلات في سلسلة الأمر.", + "Only collections can be edited, create a new knowledge base to edit/add documents.": "يمكن تعديل المجموعات فقط، أنشئ قاعدة معرفة جديدة لتعديل أو إضافة مستندات.", + "Only select users and groups with permission can access": "يمكن الوصول فقط من قبل المستخدمين والمجموعات المصرح لهم", + "Oops! Looks like the URL is invalid. Please double-check and try again.": "خطاء! يبدو أن عنوان URL غير صالح. يرجى التحقق مرة أخرى والمحاولة مرة أخرى.", + "Oops! There are files still uploading. Please wait for the upload to complete.": "عذرًا! لا تزال بعض الملفات قيد الرفع. يرجى الانتظار حتى يكتمل الرفع.", + "Oops! There was an error in the previous response.": "عذرًا! حدث خطأ في الرد السابق.", + "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "خطاء! أنت تستخدم طريقة غير مدعومة (الواجهة الأمامية فقط). يرجى تقديم واجهة WebUI من الواجهة الخلفية.", + "Open file": "فتح الملف", + "Open in full screen": "فتح في وضع ملء الشاشة", + "Open new chat": "فتح محادثة جديده", + "Open WebUI uses faster-whisper internally.": "تستخدم واجهة WebUI أداة faster-whisper داخليًا.", + "Open WebUI uses SpeechT5 and CMU Arctic speaker embeddings.": "تستخدم WebUI نموذج SpeechT5 وتضمينات صوتية من CMU Arctic.", + "Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "إصدار WebUI الحالي (v{{OPEN_WEBUI_VERSION}}) أقل من الإصدار المطلوب (v{{REQUIRED_VERSION}})", + "OpenAI": "OpenAI", + "OpenAI API": "OpenAI API", + "OpenAI API Config": "OpenAI API إعدادات", + "OpenAI API Key is required.": "OpenAI API.مطلوب مفتاح ", + "OpenAI API settings updated": "تم تحديث إعدادات OpenAI API", + "OpenAI URL/Key required.": "URL/مفتاح OpenAI.مطلوب عنوان ", + "or": "أو", + "Organize your users": "تنظيم المستخدمين الخاصين بك", + "Other": "آخر", + "OUTPUT": "الإخراج", + "Output format": "تنسيق الإخراج", + "Overview": "نظرة عامة", + "page": "صفحة", + "Password": "الباسورد", + "Paste Large Text as File": "الصق نصًا كبيرًا كملف", + "PDF document (.pdf)": "PDF ملف (.pdf)", + "PDF Extract Images (OCR)": "PDF أستخرج الصور (OCR)", + "pending": "قيد الانتظار", + "Permission denied when accessing media devices": "تم رفض الإذن عند محاولة الوصول إلى أجهزة الوسائط", + "Permission denied when accessing microphone": "تم رفض الإذن عند محاولة الوصول إلى الميكروفون", + "Permission denied when accessing microphone: {{error}}": "{{error}} تم رفض الإذن عند الوصول إلى الميكروفون ", + "Permissions": "الأذونات", + "Perplexity API Key": "مفتاح API لـ Perplexity", + "Personalization": "التخصيص", + "Pin": "تثبيت", + "Pinned": "مثبت", + "Pioneer insights": "رؤى رائدة", + "Pipeline deleted successfully": "تم حذف خط المعالجة بنجاح", + "Pipeline downloaded successfully": "تم تنزيل خط المعالجة بنجاح", + "Pipelines": "خطوط الانابيب", + "Pipelines Not Detected": "لم يتم الكشف عن خطوط المعالجة", + "Pipelines Valves": "صمامات خطوط الأنابيب", + "Plain text (.txt)": "نص عادي (.txt)", + "Playground": "مكان التجربة", + "Please carefully review the following warnings:": "يرجى مراجعة التحذيرات التالية بعناية:", + "Please do not close the settings page while loading the model.": "الرجاء عدم إغلاق صفحة الإعدادات أثناء تحميل النموذج.", + "Please enter a prompt": "الرجاء إدخال توجيه", + "Please fill in all fields.": "الرجاء تعبئة جميع الحقول.", + "Please select a model first.": "الرجاء اختيار نموذج أولاً.", + "Please select a model.": "الرجاء اختيار نموذج.", + "Please select a reason": "الرجاء اختيار سبب", + "Port": "المنفذ", + "Positive attitude": "موقف ايجابي", + "Prefix ID": "معرف البادئة", + "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "يُستخدم معرف البادئة لتفادي التعارض مع الاتصالات الأخرى من خلال إضافة بادئة إلى معرفات النماذج – اتركه فارغًا لتعطيله", + "Presence Penalty": "عقوبة التكرار", + "Previous 30 days": "أخر 30 يوم", + "Previous 7 days": "أخر 7 أيام", + "Profile Image": "صورة الملف الشخصي", + "Prompt": "التوجيه", + "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "موجه (على سبيل المثال: أخبرني بحقيقة ممتعة عن الإمبراطورية الرومانية)", + "Prompt Content": "محتوى عاجل", + "Prompt created successfully": "تم إنشاء التوجيه بنجاح", + "Prompt suggestions": "اقتراحات سريعة", + "Prompt updated successfully": "تم تحديث التوجيه بنجاح", + "Prompts": "مطالبات", + "Prompts Access": "الوصول إلى التوجيهات", + "Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ", + "Pull a model from Ollama.com": "Ollama.com سحب الموديل من ", + "Query Generation Prompt": "توجيه إنشاء الاستعلام", + "RAG Template": "RAG تنمبلت", + "Rating": "التقييم", + "Re-rank models by topic similarity": "إعادة ترتيب النماذج حسب تشابه الموضوع", + "Read": "قراءة", + "Read Aloud": "أقراء لي", + "Reasoning Effort": "جهد الاستدلال", + "Record voice": "سجل صوت", + "Redirecting you to Open WebUI Community": "OpenWebUI إعادة توجيهك إلى مجتمع ", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "يقلل من احتمال توليد إجابات غير منطقية. القيم الأعلى (مثل 100) تعطي إجابات أكثر تنوعًا، بينما القيم الأدنى (مثل 10) تكون أكثر تحفظًا.", + "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "أشر إلى نفسك باسم \"المستخدم\" (مثل: \"المستخدم يتعلم الإسبانية\")", + "References from": "مراجع من", + "Refused when it shouldn't have": "رفض عندما لا ينبغي أن يكون", + "Regenerate": "تجديد", + "Release Notes": "ملاحظات الإصدار", + "Relevance": "الصلة", + "Remove": "إزالة", + "Remove Model": "حذف الموديل", + "Rename": "إعادة تسمية", + "Reorder Models": "إعادة ترتيب النماذج", + "Repeat Last N": "N كرر آخر", + "Repeat Penalty (Ollama)": "عقوبة التكرار (Ollama)", + "Reply in Thread": "الرد داخل سلسلة الرسائل", + "Request Mode": "وضع الطلب", + "Reranking Model": "إعادة تقييم النموذج", + "Reranking model disabled": "تم تعطيل نموذج إعادة الترتيب", + "Reranking model set to \"{{reranking_model}}\"": "تم ضبط نموذج إعادة الترتيب على \"{{reranking_model}}\"", + "Reset": "إعادة تعيين", + "Reset All Models": "إعادة تعيين جميع النماذج", + "Reset Upload Directory": "إعادة تعيين مجلد التحميل", + "Reset Vector Storage/Knowledge": "إعادة تعيين تخزين المتجهات/المعرفة", + "Reset view": "إعادة تعيين العرض", + "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "لا يمكن تفعيل إشعارات الردود لأن صلاحيات الموقع مرفوضة. يرجى التوجه إلى إعدادات المتصفح لمنح الصلاحية اللازمة.", + "Response splitting": "تقسيم الرد", + "Result": "النتيجة", + "Retrieval": "الاسترجاع", + "Retrieval Query Generation": "توليد استعلام الاسترجاع", + "Rich Text Input for Chat": "إدخال نص منسق للمحادثة", + "RK": "RK", + "Role": "منصب", + "Rosé Pine": "Rosé Pine", + "Rosé Pine Dawn": "Rosé Pine Dawn", + "RTL": "من اليمين إلى اليسار", + "Run": "تنفيذ", + "Running": "جارٍ التنفيذ", + "Save": "حفظ", + "Save & Create": "حفظ وإنشاء", + "Save & Update": "حفظ وتحديث", + "Save As Copy": "حفظ كنسخة", + "Save Tag": "حفظ الوسم", + "Saved": "تم الحفظ", + "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "لم يعد حفظ سجلات الدردشة مباشرة في مساحة تخزين متصفحك مدعومًا. يرجى تخصيص بعض الوقت لتنزيل وحذف سجلات الدردشة الخاصة بك عن طريق النقر على الزر أدناه. لا تقلق، يمكنك بسهولة إعادة استيراد سجلات الدردشة الخاصة بك إلى الواجهة الخلفية من خلاله", + "Scroll to bottom when switching between branches": "التمرير للأسفل عند التبديل بين الفروع", + "Search": "البحث", + "Search a model": "البحث عن موديل", + "Search Base": "قاعدة البحث", + "Search Chats": "البحث في الدردشات", + "Search Collection": "البحث في المجموعة", + "Search Filters": "مرشحات البحث", + "search for tags": "البحث عن وسوم", + "Search Functions": "البحث في الوظائف", + "Search Knowledge": "البحث في المعرفة", + "Search Models": "نماذج البحث", + "Search options": "خيارات البحث", + "Search Prompts": "أبحث حث", + "Search Result Count": "عدد نتائج البحث", + "Search the internet": "البحث في الإنترنت", + "Search Tools": "أدوات البحث", + "SearchApi API Key": "مفتاح API لـ SearchApi", + "SearchApi Engine": "محرك SearchApi", + "Searched {{count}} sites": "تم البحث في {{count}} مواقع", + "Searching \"{{searchQuery}}\"": "جارٍ البحث عن \"{{searchQuery}}\"", + "Searching Knowledge for \"{{searchQuery}}\"": "جارٍ البحث في المعرفة عن \"{{searchQuery}}\"", + "Searxng Query URL": "عنوان URL لاستعلام Searxng", + "See readme.md for instructions": "readme.md للحصول على التعليمات", + "See what's new": "ما الجديد", + "Seed": "Seed", + "Select a base model": "حدد نموذجا أساسيا", + "Select a engine": "اختر محركًا", + "Select a function": "اختر وظيفة", + "Select a group": "اختر مجموعة", + "Select a model": "أختار الموديل", + "Select a pipeline": "حدد مسارا", + "Select a pipeline url": "حدد عنوان URL لخط الأنابيب", + "Select a tool": "اختر أداة", + "Select an auth method": "اختر طريقة التوثيق", + "Select an Ollama instance": "اختر نسخة Ollama", + "Select Engine": "اختر المحرك", + "Select Knowledge": "اختر المعرفة", + "Select only one model to call": "اختر نموذجًا واحدًا فقط للاستدعاء", + "Selected model(s) do not support image inputs": "النموذج (النماذج) المحددة لا تدعم مدخلات الصور", + "Semantic distance to query": "المسافة الدلالية إلى الاستعلام", + "Send": "تم", + "Send a Message": "يُرجى إدخال طلبك هنا", + "Send message": "يُرجى إدخال طلبك هنا.", + "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "يرسل `stream_options: { include_usage: true }` في الطلب.\nالمزودون المدعومون سيُرجعون معلومات استخدام الرموز في الاستجابة عند التفعيل.", + "September": "سبتمبر", + "SerpApi API Key": "مفتاح API لـ SerpApi", + "SerpApi Engine": "محرك SerpApi", + "Serper API Key": "مفتاح واجهة برمجة تطبيقات سيربر", + "Serply API Key": "مفتاح API لـ Serply", + "Serpstack API Key": "مفتاح واجهة برمجة تطبيقات Serpstack", + "Server connection verified": "تم التحقق من اتصال الخادم", + "Set as default": "الافتراضي", + "Set CFG Scale": "ضبط مقياس CFG", + "Set Default Model": "تفعيد الموديل الافتراضي", + "Set embedding model": "تعيين نموذج التضمين", + "Set embedding model (e.g. {{model}})": "ضبط نموذج المتجهات (على سبيل المثال: {{model}})", + "Set Image Size": "حجم الصورة", + "Set reranking model (e.g. {{model}})": "ضبط نموذج إعادة الترتيب (على سبيل المثال: {{model}})", + "Set Sampler": "تعيين العينة", + "Set Scheduler": "تعيين المجدول", + "Set Steps": "ضبط الخطوات", + "Set Task Model": "تعيين نموذج المهمة", + "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "تعيين عدد الطبقات التي سيتم تفريغها إلى وحدة معالجة الرسومات (GPU). زيادتها قد تحسن الأداء بشكل كبير، لكنها تستهلك طاقة وموارد GPU أكثر.", + "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "تحديد عدد سلاسل المعالجة المستخدمة في الحساب. هذا الخيار يتحكم في عدد السلاسل لمعالجة الطلبات بالتوازي. زيادته يحسن الأداء تحت الضغط العالي لكنه يستهلك موارد المعالج.", + "Set Voice": "ضبط الصوت", + "Set whisper model": "تعيين نموذج Whisper", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "تعيين انحياز ثابت ضد الرموز التي ظهرت مرة واحدة على الأقل. القيم الأعلى (مثل 1.5) تعاقب التكرار بقوة، والأقل (مثل 0.9) تكون أكثر تساهلًا. عند 0 يتم تعطيله.", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "يحدد انحيازًا متدرجًا ضد الرموز لمعاقبة التكرار حسب عدد مرات الظهور. القيم الأعلى (1.5) تعاقب أكثر، والأقل (0.9) تكون أكثر تساهلًا. عند 0 يتم تعطيله.", + "Sets how far back for the model to look back to prevent repetition.": "يحدد مدى رجوع النموذج إلى الوراء لتجنب التكرار.", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "يحدد البذرة العشوائية لاستخدامها في التوليد. تعيين قيمة معينة يجعل النموذج ينتج نفس النص لنفس التوجيه.", + "Sets the size of the context window used to generate the next token.": "يحدد حجم نافذة السياق المستخدمة لتوليد الرمز التالي.", + "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "يحدد تسلسلات الإيقاف. عند مواجهتها، سيتوقف النموذج عن التوليد ويُرجع النتيجة. يمكن تحديد أنماط توقف متعددة داخل ملف النموذج.", + "Settings": "الاعدادات", + "Settings saved successfully!": "تم حفظ الاعدادات بنجاح", + "Share": "كشاركة", + "Share Chat": "مشاركة الدردشة", + "Share to Open WebUI Community": "OpenWebUI شارك في مجتمع", + "Show": "عرض", + "Show \"What's New\" modal on login": "عرض نافذة \"ما الجديد\" عند تسجيل الدخول", + "Show Admin Details in Account Pending Overlay": "عرض تفاصيل المشرف في نافذة \"الحساب قيد الانتظار\"", + "Show shortcuts": "إظهار الاختصارات", + "Show your support!": "أظهر دعمك!", + "Showcased creativity": "أظهر الإبداع", + "Sign in": "تسجيل الدخول", + "Sign in to {{WEBUI_NAME}}": "سجّل الدخول إلى {{WEBUI_NAME}}", + "Sign in to {{WEBUI_NAME}} with LDAP": "سجّل الدخول إلى {{WEBUI_NAME}} باستخدام LDAP", + "Sign Out": "تسجيل الخروج", + "Sign up": "تسجيل", + "Sign up to {{WEBUI_NAME}}": "سجّل في {{WEBUI_NAME}}", + "Signing in to {{WEBUI_NAME}}": "جارٍ تسجيل الدخول إلى {{WEBUI_NAME}}", + "sk-1234": "sk-1234", + "Source": "المصدر", + "Speech Playback Speed": "سرعة تشغيل الصوت", + "Speech recognition error: {{error}}": "{{error}} خطأ في التعرف على الكلام", + "Speech-to-Text Engine": "محرك تحويل الكلام إلى نص", + "Stop": "إيقاف", + "Stop Sequence": "وقف التسلسل", + "Stream Chat Response": "بث استجابة الدردشة", + "STT Model": "نموذج تحويل الصوت إلى نص (STT)", + "STT Settings": "STT اعدادات", + "Subtitle (e.g. about the Roman Empire)": "(e.g. about the Roman Empire) الترجمة", + "Success": "نجاح", + "Successfully updated.": "تم التحديث بنجاح", + "Suggested": "مقترحات", + "Support": "الدعم", + "Support this plugin:": "دعم هذا المكون الإضافي:", + "Sync directory": "مزامنة المجلد", + "System": "النظام", + "System Instructions": "تعليمات النظام", + "System Prompt": "محادثة النظام", + "Tags Generation": "إنشاء الوسوم", + "Tags Generation Prompt": "توجيه إنشاء الوسوم", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "يتم استخدام أخذ العينات بدون ذيل لتقليل تأثير الرموز الأقل احتمالًا. القيمة الأعلى (مثل 2.0) تقلل التأثير أكثر، والقيمة 1.0 تعطل هذا الإعداد.", + "Talk to model": "تحدث إلى النموذج", + "Tap to interrupt": "اضغط للمقاطعة", + "Tasks": "المهام", + "Tavily API Key": "مفتاح API لـ Tavily", + "Tell us more:": "أخبرنا المزيد:", + "Temperature": "درجة حرارة", + "Template": "نموذج", + "Temporary Chat": "محادثة مؤقتة", + "Text Splitter": "تقسيم النص", + "Text-to-Speech Engine": "محرك تحويل النص إلى كلام", + "Tfs Z": "Tfs Z", + "Thanks for your feedback!": "شكرا لملاحظاتك!", + "The Application Account DN you bind with for search": "DN لحساب التطبيق الذي تستخدمه للبحث", + "The base to search for users": "الأساس الذي يُستخدم للبحث عن المستخدمين", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "يحدد حجم الدفعة عدد طلبات النصوص التي تتم معالجتها معًا. الحجم الأكبر يمكن أن يزيد الأداء والسرعة، ولكنه يحتاج أيضًا إلى ذاكرة أكبر.", + "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "المطورون خلف هذا المكون الإضافي هم متطوعون شغوفون من المجتمع. إذا وجدت هذا المكون مفيدًا، فكر في المساهمة في تطويره.", + "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "قائمة التقييم تعتمد على نظام Elo ويتم تحديثها في الوقت الفعلي.", + "The LDAP attribute that maps to the mail that users use to sign in.": "السمة LDAP التي تتوافق مع البريد الإلكتروني الذي يستخدمه المستخدمون لتسجيل الدخول.", + "The LDAP attribute that maps to the username that users use to sign in.": "السمة LDAP التي تتوافق مع اسم المستخدم الذي يستخدمه المستخدمون لتسجيل الدخول.", + "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "لوحة المتصدرين حالياً في وضع تجريبي، وقد نقوم بتعديل حسابات التصنيف أثناء تحسين الخوارزمية.", + "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "الحد الأقصى لحجم الملف بالميغابايت. إذا تجاوز الملف هذا الحد، فلن يتم رفعه.", + "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "الحد الأقصى لعدد الملفات التي يمكن استخدامها في المحادثة دفعة واحدة. إذا تجاوز العدد هذا الحد، فلن يتم رفع الملفات.", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "يجب أن تكون النتيجة قيمة تتراوح بين 0.0 (0%) و1.0 (100%).", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "درجة حرارة النموذج. زيادتها تجعل الإجابات أكثر إبداعًا.", + "Theme": "الثيم", + "Thinking...": "جارٍ التفكير...", + "This action cannot be undone. Do you wish to continue?": "لا يمكن التراجع عن هذا الإجراء. هل ترغب في المتابعة؟", + "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!", + "This is an experimental feature, it may not function as expected and is subject to change at any time.": "هذه ميزة تجريبية، وقد لا تعمل كما هو متوقع وقد تتغير في أي وقت.", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "هذا الخيار يحدد عدد الرموز التي يتم الاحتفاظ بها عند تحديث السياق. مثلاً، إذا تم ضبطه على 2، سيتم الاحتفاظ بآخر رمزين من السياق. الحفاظ على السياق يساعد في استمرارية المحادثة، لكنه قد يحد من التفاعل مع مواضيع جديدة.", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "يحدد هذا الخيار الحد الأقصى لعدد الرموز التي يمكن للنموذج توليدها في الرد. زيادته تتيح للنموذج تقديم إجابات أطول، لكنها قد تزيد من احتمالية توليد محتوى غير مفيد أو غير ذي صلة.", + "This option will delete all existing files in the collection and replace them with newly uploaded files.": "سيؤدي هذا الخيار إلى حذف جميع الملفات الحالية في المجموعة واستبدالها بالملفات التي تم تحميلها حديثًا.", + "This response was generated by \"{{model}}\"": "تم توليد هذا الرد بواسطة \"{{model}}\"", + "This will delete": "هذا سيقوم بالحذف", + "This will delete {{NAME}} and all its contents.": "هذا سيحذف {{NAME}} وكل محتوياته.", + "This will delete all models including custom models": "هذا سيحذف جميع النماذج بما في ذلك النماذج المخصصة", + "This will delete all models including custom models and cannot be undone.": "هذا سيحذف جميع النماذج بما في ذلك المخصصة ولا يمكن التراجع عن هذا الإجراء.", + "This will reset the knowledge base and sync all files. Do you wish to continue?": "هذا سيؤدي إلى إعادة تعيين قاعدة المعرفة ومزامنة جميع الملفات. هل ترغب في المتابعة؟", + "Thorough explanation": "شرح شامل", + "Thought for {{DURATION}}": "فكّر لمدة {{DURATION}}", + "Thought for {{DURATION}} seconds": "فكّر لمدة {{DURATION}} ثانية", + "Tika": "Tika", + "Tika Server URL required.": "عنوان خادم Tika مطلوب.", + "Tiktoken": "Tiktoken", + "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "ملاحضة: قم بتحديث عدة فتحات متغيرة على التوالي عن طريق الضغط على مفتاح tab في مدخلات الدردشة بعد كل استبدال.", + "Title": "العنوان", + "Title (e.g. Tell me a fun fact)": "(e.g. Tell me a fun fact) العناون", + "Title Auto-Generation": "توليد تلقائي للعنوان", + "Title cannot be an empty string.": "العنوان مطلوب", + "Title Generation": "توليد العنوان", + "Title Generation Prompt": "موجه إنشاء العنوان", + "TLS": "TLS", + "To access the available model names for downloading,": "للوصول إلى أسماء الموديلات المتاحة للتنزيل،", + "To access the GGUF models available for downloading,": "للوصول إلى الموديلات GGUF المتاحة للتنزيل،", + "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "للوصول إلى واجهة WebUI، يُرجى التواصل مع المسؤول. يمكن للمسؤولين إدارة حالة المستخدمين من لوحة الإدارة.", + "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "لإرفاق قاعدة المعرفة هنا، أضفها أولاً إلى مساحة العمل \"المعرفة\".", + "To learn more about available endpoints, visit our documentation.": "لمعرفة المزيد حول نقاط النهاية المتاحة، قم بزيارة الوثائق الخاصة بنا.", + "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "لحماية خصوصيتك، يتم مشاركة التقييمات ومعرّفات النماذج والوسوم والبيانات الوصفية فقط من ملاحظاتك—سجلات الدردشة تظل خاصة ولا تُدرج.", + "To select actions here, add them to the \"Functions\" workspace first.": "لاختيار الإجراءات هنا، أضفها أولاً إلى مساحة العمل \"الوظائف\".", + "To select filters here, add them to the \"Functions\" workspace first.": "لاختيار الفلاتر هنا، أضفها أولاً إلى مساحة العمل \"الوظائف\".", + "To select toolkits here, add them to the \"Tools\" workspace first.": "لاختيار الأدوات هنا، أضفها أولاً إلى مساحة العمل \"الأدوات\".", + "Toast notifications for new updates": "إشعارات منبثقة للتحديثات الجديدة", + "Today": "اليوم", + "Toggle settings": "فتح وأغلاق الاعدادات", + "Toggle sidebar": "فتح وأغلاق الشريط الجانبي", + "Token": "رمز", + "Tokens To Keep On Context Refresh (num_keep)": "الرموز المحفوظة عند تحديث السياق (num_keep)", + "Too verbose": "مفرط في التفاصيل", + "Tool created successfully": "تم إنشاء الأداة بنجاح", + "Tool deleted successfully": "تم حذف الأداة بنجاح", + "Tool Description": "وصف الأداة", + "Tool ID": "معرف الأداة", + "Tool imported successfully": "تم استيراد الأداة بنجاح", + "Tool Name": "اسم الأداة", + "Tool updated successfully": "تم تحديث الأداة بنجاح", + "Tools": "الأدوات", + "Tools Access": "الوصول إلى الأدوات", + "Tools are a function calling system with arbitrary code execution": "الأدوات عبارة عن نظام لاستدعاء الوظائف يسمح بتنفيذ كود برمجي مخصص", + "Tools Function Calling Prompt": "توجيه استدعاء وظائف الأدوات", + "Tools have a function calling system that allows arbitrary code execution": "تحتوي الأدوات على نظام لاستدعاء الوظائف يتيح تنفيذ كود برمجي مخصص", + "Tools have a function calling system that allows arbitrary code execution.": "تحتوي الأدوات على نظام لاستدعاء الوظائف يتيح تنفيذ كود برمجي مخصص.", + "Top K": "Top K", + "Top P": "Top P", + "Transformers": "Transformers", + "Trouble accessing Ollama?": "هل تواجه مشكلة في الوصول", + "Trust Proxy Environment": "بيئة البروكسي الموثوقة", + "TTS Model": "نموذج تحويل النص إلى كلام (TTS)", + "TTS Settings": "TTS اعدادات", + "TTS Voice": "صوت TTS", + "Type": "نوع", + "Type Hugging Face Resolve (Download) URL": "اكتب عنوان URL لحل مشكلة الوجه (تنزيل).", + "Uh-oh! There was an issue with the response.": "أوه! حدثت مشكلة في الرد.", + "UI": "واجهة المستخدم", + "Unarchive All": "إلغاء أرشفة الكل", + "Unarchive All Archived Chats": "إلغاء أرشفة جميع المحادثات المؤرشفة", + "Unarchive Chat": "إلغاء أرشفة المحادثة", + "Unlock mysteries": "اكشف الأسرار", + "Unpin": "إزالة التثبيت", + "Unravel secrets": "فكّ الأسرار", + "Untagged": "بدون وسوم", + "Update": "تحديث", + "Update and Copy Link": "تحديث ونسخ الرابط", + "Update for the latest features and improvements.": "حدّث للحصول على أحدث الميزات والتحسينات.", + "Update password": "تحديث كلمة المرور", + "Updated": "تم التحديث", + "Updated at": "تم التحديث في", + "Updated At": "تم التحديث في", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "قم بالترقية إلى خطة مرخصة للحصول على ميزات إضافية، مثل التخصيص والدعم المخصص.", + "Upload": "رفع", + "Upload a GGUF model": "GGUF رفع موديل نوع", + "Upload directory": "رفع مجلد", + "Upload files": "رفع ملفات", + "Upload Files": "تحميل الملفات", + "Upload Pipeline": "رفع خط المعالجة", + "Upload Progress": "جاري التحميل", + "URL": "الرابط", + "URL Mode": "رابط الموديل", + "Use '#' in the prompt input to load and include your knowledge.": "استخدم الرمز '#' في خانة التوجيه لتحميل وإدراج المعرفة الخاصة بك.", + "Use Gravatar": "Gravatar أستخدم", + "Use groups to group your users and assign permissions.": "استخدم المجموعات لتجميع المستخدمين وتحديد الصلاحيات.", + "Use Initials": "Initials أستخدم", + "use_mlock (Ollama)": "use_mlock (أولاما)", + "use_mmap (Ollama)": "use_mmap (أولاما)", + "user": "مستخدم", + "User": "مستخدم", + "User location successfully retrieved.": "تم استرجاع موقع المستخدم بنجاح.", + "Username": "اسم المستخدم", + "Users": "المستخدمين", + "Using the default arena model with all models. Click the plus button to add custom models.": "يتم استخدام نموذج الساحة الافتراضي مع جميع النماذج. اضغط على زر + لإضافة نماذج مخصصة.", + "Utilize": "يستخدم", + "Valid time units:": "وحدات زمنية صالحة:", + "Valves": "الصمامات", + "Valves updated": "تم تحديث الصمامات", + "Valves updated successfully": "تم تحديث الصمامات بنجاح", + "variable": "المتغير", + "variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.", + "Version": "إصدار", + "Version {{selectedVersion}} of {{totalVersions}}": "الإصدار {{selectedVersion}} من {{totalVersions}}", + "View Replies": "عرض الردود", + "Visibility": "مستوى الظهور", + "Voice": "الصوت", + "Voice Input": "إدخال صوتي", + "Warning": "تحذير", + "Warning:": "تحذير:", + "Warning: Enabling this will allow users to upload arbitrary code on the server.": "تحذير: تفعيل هذا الخيار سيسمح للمستخدمين برفع كود عشوائي على الخادم.", + "Warning: If you update or change your embedding model, you will need to re-import all documents.": "تحذير: إذا قمت بتحديث أو تغيير نموذج التضمين الخاص بك، فستحتاج إلى إعادة استيراد كافة المستندات.", + "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "تحذير: تنفيذ كود Jupyter يتيح تنفيذ كود عشوائي مما يشكل مخاطر أمنية جسيمة—تابع بحذر شديد.", + "Web": "Web", + "Web API": "واجهة برمجة التطبيقات (API)", + "Web Search": "بحث الويب", + "Web Search Engine": "محرك بحث الويب", + "Web Search in Chat": "بحث ويب داخل المحادثة", + "Web Search Query Generation": "توليد استعلام بحث الويب", + "Webhook URL": "Webhook الرابط", + "WebUI Settings": "WebUI اعدادات", + "WebUI URL": "رابط WebUI", + "WebUI will make requests to \"{{url}}/api/chat\"": "ستقوم WebUI بإرسال الطلبات إلى \"{{url}}/api/chat\"", + "WebUI will make requests to \"{{url}}/chat/completions\"": "ستقوم WebUI بإرسال الطلبات إلى \"{{url}}/chat/completions\"", + "What are you trying to achieve?": "ما الذي تحاول تحقيقه؟", + "What are you working on?": "على ماذا تعمل؟", + "What’s New in": "ما هو الجديد", + "When enabled, the model will respond to each chat message in real-time, generating a response as soon as the user sends a message. This mode is useful for live chat applications, but may impact performance on slower hardware.": "عند التفعيل، سيستجيب النموذج لكل رسالة في المحادثة بشكل فوري، مولدًا الرد بمجرد إرسال المستخدم لرسالته. هذا الوضع مفيد لتطبيقات الدردشة الحية، لكنه قد يؤثر على الأداء في الأجهزة الأبطأ.", + "wherever you are": "أينما كنت", + "Whisper (Local)": "Whisper (محلي)", + "Why?": "لماذا؟", + "Widescreen Mode": "وضع الشاشة العريضة", + "Won": "فاز", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "يعمل جنبًا إلى جنب مع top-k. القيمة الأعلى (مثلاً 0.95) تنتج نصًا أكثر تنوعًا، بينما القيمة الأقل (مثلاً 0.5) تنتج نصًا أكثر تركيزًا وتحفظًا.", + "Workspace": "مساحة العمل", + "Workspace Permissions": "صلاحيات مساحة العمل", + "Write": "كتابة", + "Write a prompt suggestion (e.g. Who are you?)": "اكتب اقتراحًا سريعًا (على سبيل المثال، من أنت؟)", + "Write a summary in 50 words that summarizes [topic or keyword].": "اكتب ملخصًا في 50 كلمة يلخص [الموضوع أو الكلمة الرئيسية]", + "Write something...": "اكتب شيئًا...", + "Write your model template content here": "اكتب هنا محتوى قالب النموذج الخاص بك", + "Yesterday": "أمس", + "You": "انت", + "You are currently using a trial license. Please contact support to upgrade your license.": "أنت تستخدم حالياً ترخيصًا تجريبيًا. يُرجى التواصل مع الدعم للترقية.", + "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "يمكنك الدردشة مع {{maxCount}} ملف(ات) كحد أقصى في نفس الوقت.", + "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "يمكنك تخصيص تفاعلك مع النماذج اللغوية عن طريق إضافة الذكريات باستخدام زر \"إدارة\" أدناه، مما يجعلها أكثر فائدة وتناسبًا لك.", + "You cannot upload an empty file.": "لا يمكنك رفع ملف فارغ.", + "You do not have permission to access this feature.": "ليس لديك صلاحية للوصول إلى هذه الميزة.", + "You do not have permission to upload files": "ليس لديك صلاحية لرفع الملفات", + "You do not have permission to upload files.": "ليس لديك صلاحية لرفع الملفات.", + "You have no archived conversations.": "لا تملك محادثات محفوظه", + "You have shared this chat": "تم مشاركة هذه المحادثة", + "You're a helpful assistant.": "مساعدك المفيد هنا", + "You're now logged in.": "لقد قمت الآن بتسجيل الدخول.", + "Your account status is currently pending activation.": "حالة حسابك حالياً بانتظار التفعيل.", + "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "سيتم توجيه كامل مساهمتك مباشرة إلى مطور المكون الإضافي؛ لا تأخذ Open WebUI أي نسبة. ومع ذلك، قد تفرض منصة التمويل المختارة رسومًا خاصة بها.", + "Youtube": "Youtube", + "Youtube Language": "لغة YouTube", + "Youtube Proxy URL": "رابط بروكسي YouTube" + } \ No newline at end of file diff --git a/src/lib/i18n/locales/languages.json b/src/lib/i18n/locales/languages.json index 5672e45929f..3ad0b246ca0 100644 --- a/src/lib/i18n/locales/languages.json +++ b/src/lib/i18n/locales/languages.json @@ -1,4 +1,8 @@ [ + { + "code": "ar", + "title": "Arabic (العربية)" + }, { "code": "en-US", "title": "English (US)" @@ -9,7 +13,7 @@ }, { "code": "ar-BH", - "title": "Arabic (عربي)" + "title": "Arabic (Bahrain)" }, { "code": "bn-BD", From 1ac6879268e3526bf71006b46b6cc45d281eda87 Mon Sep 17 00:00:00 2001 From: Patrick Wachter Date: Sat, 22 Mar 2025 13:44:50 +0100 Subject: [PATCH 509/623] Add Mistral OCR integration and configuration support --- backend/open_webui/config.py | 5 ++ backend/open_webui/main.py | 2 + backend/open_webui/retrieval/loaders/main.py | 59 +++++++++++++++++++ backend/open_webui/routers/retrieval.py | 16 +++++ backend/requirements.txt | 1 + .../admin/Settings/Documents.svelte | 27 +++++++-- 6 files changed, 105 insertions(+), 5 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 0ac92bd23bd..02f61696ecc 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1727,6 +1727,11 @@ class BannerModel(BaseModel): os.getenv("DOCUMENT_INTELLIGENCE_KEY", ""), ) +MISTRAL_OCR_API_KEY = PersistentConfig( + "MISTRAL_OCR_API_KEY", + "rag.mistral_ocr_api_key", + os.getenv("MISTRAL_OCR_API_KEY", ""), +) BYPASS_EMBEDDING_AND_RETRIEVAL = PersistentConfig( "BYPASS_EMBEDDING_AND_RETRIEVAL", diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index bb78d900346..383523174c5 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -191,6 +191,7 @@ DOCLING_SERVER_URL, DOCUMENT_INTELLIGENCE_ENDPOINT, DOCUMENT_INTELLIGENCE_KEY, + MISTRAL_OCR_API_KEY, RAG_TOP_K, RAG_TOP_K_RERANKER, RAG_TEXT_SPLITTER, @@ -582,6 +583,7 @@ async def lifespan(app: FastAPI): app.state.config.DOCLING_SERVER_URL = DOCLING_SERVER_URL app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = DOCUMENT_INTELLIGENCE_ENDPOINT app.state.config.DOCUMENT_INTELLIGENCE_KEY = DOCUMENT_INTELLIGENCE_KEY +app.state.config.MISTRAL_OCR_API_KEY = MISTRAL_OCR_API_KEY app.state.config.TEXT_SPLITTER = RAG_TEXT_SPLITTER app.state.config.TIKTOKEN_ENCODING_NAME = TIKTOKEN_ENCODING_NAME diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index 295d0414a75..e75c69682d2 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -20,6 +20,9 @@ YoutubeLoader, ) from langchain_core.documents import Document + +from mistralai import Mistral + from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) @@ -163,6 +166,53 @@ def load(self) -> list[Document]: raise Exception(f"Error calling Docling: {error_msg}") +class MistralLoader: + def __init__(self, api_key: str, file_path: str): + self.api_key = api_key + self.file_path = file_path + self.client = Mistral(api_key=api_key) + + def load(self) -> list[Document]: + log.info("Uploading file to Mistral OCR") + uploaded_pdf = self.client.files.upload( + file={ + "file_name": self.file_path.split("/")[-1], + "content": open(self.file_path, "rb"), + }, + purpose="ocr", + ) + log.info("File uploaded to Mistral OCR, getting signed URL") + signed_url = self.client.files.get_signed_url(file_id=uploaded_pdf.id) + log.info("Signed URL received, processing OCR") + ocr_response = self.client.ocr.process( + model="mistral-ocr-latest", + document={ + "type": "document_url", + "document_url": signed_url.url, + }, + ) + log.info("OCR processing done, deleting uploaded file") + deleted_pdf = self.client.files.delete(file_id=uploaded_pdf.id) + log.info("Uploaded file deleted") + log.debug("OCR response: %s", ocr_response) + if not hasattr(ocr_response, "pages") or not ocr_response.pages: + log.error("No pages found in OCR response") + return [Document(page_content="No text content found", metadata={})] + + return [ + Document( + page_content=page.markdown, + metadata={ + "page": page.index, + "page_label": page.index + 1, + "total_pages": len(ocr_response.pages), + }, + ) + for page in ocr_response.pages + if hasattr(page, "markdown") and hasattr(page, "index") + ] + + class Loader: def __init__(self, engine: str = "", **kwargs): self.engine = engine @@ -222,6 +272,15 @@ def _get_loader(self, filename: str, file_content_type: str, file_path: str): api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"), api_key=self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY"), ) + elif ( + self.engine == "mistral_ocr" + and self.kwargs.get("MISTRAL_OCR_API_KEY") != "" + and file_ext + in ["pdf"] # Mistral OCR currently only supports PDF and images + ): + loader = MistralLoader( + api_key=self.kwargs.get("MISTRAL_OCR_API_KEY"), file_path=file_path + ) else: if file_ext == "pdf": loader = PyPDFLoader( diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 2bd908606ac..979cd2c70c2 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -364,6 +364,9 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, }, + "mistral_ocr_config": { + "api_key": request.app.state.config.MISTRAL_OCR_API_KEY, + }, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -427,11 +430,16 @@ class DocumentIntelligenceConfigForm(BaseModel): key: str +class MistralOCRConfigForm(BaseModel): + api_key: str + + class ContentExtractionConfig(BaseModel): engine: str = "" tika_server_url: Optional[str] = None docling_server_url: Optional[str] = None document_intelligence_config: Optional[DocumentIntelligenceConfigForm] = None + mistral_ocr_config: Optional[MistralOCRConfigForm] = None class ChunkParamUpdateForm(BaseModel): @@ -553,6 +561,10 @@ async def update_rag_config( request.app.state.config.DOCUMENT_INTELLIGENCE_KEY = ( form_data.content_extraction.document_intelligence_config.key ) + if form_data.content_extraction.mistral_ocr_config is not None: + request.app.state.config.MISTRAL_OCR_API_KEY = ( + form_data.content_extraction.mistral_ocr_config.api_key + ) if form_data.chunk is not None: request.app.state.config.TEXT_SPLITTER = form_data.chunk.text_splitter @@ -659,6 +671,9 @@ async def update_rag_config( "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, }, + "mistral_ocr_config": { + "api_key": request.app.state.config.MISTRAL_OCR_API_KEY, + }, }, "chunk": { "text_splitter": request.app.state.config.TEXT_SPLITTER, @@ -1007,6 +1022,7 @@ def process_file( PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES, DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT, DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY, + MISTRAL_OCR_API_KEY=request.app.state.config.MISTRAL_OCR_API_KEY, ) docs = loader.load( file.filename, file.meta.get("content_type"), file_path diff --git a/backend/requirements.txt b/backend/requirements.txt index ca2ea50609d..078d8d2d34e 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -77,6 +77,7 @@ psutil sentencepiece soundfile==0.13.1 azure-ai-documentintelligence==1.0.0 +mistralai==1.6.0 pillow==11.1.0 opencv-python-headless==4.11.0.86 diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 5ea12c9c084..b105ebdb9f7 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -54,6 +54,8 @@ let documentIntelligenceEndpoint = ''; let documentIntelligenceKey = ''; let showDocumentIntelligenceConfig = false; + let mistralApiKey = ''; + let showMistralOcrConfig = false; let textSplitter = ''; let chunkSize = 0; @@ -189,6 +191,10 @@ toast.error($i18n.t('Document Intelligence endpoint and key required.')); return; } + if (contentExtractionEngine === 'mistral_ocr' && mistralApiKey === '') { + toast.error($i18n.t('Mistral OCR API Key required.')); + return; + } if (!BYPASS_EMBEDDING_AND_RETRIEVAL) { await embeddingModelUpdateHandler(); @@ -220,6 +226,9 @@ document_intelligence_config: { key: documentIntelligenceKey, endpoint: documentIntelligenceEndpoint + }, + mistral_ocr_config: { + api_key: mistralApiKey } } }); @@ -284,6 +293,8 @@ documentIntelligenceEndpoint = res.content_extraction.document_intelligence_config.endpoint; documentIntelligenceKey = res.content_extraction.document_intelligence_config.key; showDocumentIntelligenceConfig = contentExtractionEngine === 'document_intelligence'; + mistralApiKey = res.content_extraction.mistral_ocr_config.api_key; + showMistralOcrConfig = contentExtractionEngine === 'mistral_ocr'; fileMaxSize = res?.file.max_size ?? ''; fileMaxCount = res?.file.max_count ?? ''; @@ -335,21 +346,21 @@
-
+
-
+
{$i18n.t('Content Extraction Engine')}
-
@@ -378,12 +389,18 @@ placeholder={$i18n.t('Enter Document Intelligence Endpoint')} bind:value={documentIntelligenceEndpoint} /> -
+ {:else if contentExtractionEngine === 'mistral_ocr'} +
+ +
{/if}
From b652b8e5f35cd948cffda27b77e2a6c7940fae6d Mon Sep 17 00:00:00 2001 From: Alex <70099710+Xelaph@users.noreply.github.com> Date: Tue, 1 Apr 2025 14:25:04 +0200 Subject: [PATCH 510/623] Update translation.json --- src/lib/i18n/locales/nl-NL/translation.json | 370 ++++++++++---------- 1 file changed, 185 insertions(+), 185 deletions(-) diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 342aac6a28b..54b3722f922 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -1,22 +1,22 @@ { - "-1 for no limit, or a positive integer for a specific limit": "", + "-1 for no limit, or a positive integer for a specific limit": "-1 voor geen limiet, of een positief getal voor een specifiek limiet", "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w', of '-1' for geen vervaldatum.", "(e.g. `sh webui.sh --api --api-auth username_password`)": "(bv. `sh webui.sh --api --api-auth gebruikersnaam_wachtwoord`)", "(e.g. `sh webui.sh --api`)": "(bv. `sh webui.sh --api`)", "(latest)": "(nieuwste)", - "(Ollama)": "", + "(Ollama)": "(Ollama)", "{{ models }}": "{{ modellen }}", - "{{COUNT}} Available Tool Servers": "", - "{{COUNT}} hidden lines": "", - "{{COUNT}} Replies": "", + "{{COUNT}} Available Tool Servers": "{{COUNT}} beschikbare gereedschapservers", + "{{COUNT}} hidden lines": "{{COUNT}} verborgen regels", + "{{COUNT}} Replies": "{{COUNT}} antwoorden", "{{user}}'s Chats": "{{user}}'s chats", - "{{webUIName}} Backend Required": "{{webUIName}} Backend Verplicht", + "{{webUIName}} Backend Required": "{{webUIName}} Backend verplicht", "*Prompt node ID(s) are required for image generation": "*Prompt node ID('s) zijn vereist voor het genereren van afbeeldingen", "A new version (v{{LATEST_VERSION}}) is now available.": "Een nieuwe versie (v{{LATEST_VERSION}}) is nu beschikbaar", "A task model is used when performing tasks such as generating titles for chats and web search queries": "Een taakmodel wordt gebruikt bij het uitvoeren van taken zoals het genereren van titels voor chats en zoekopdrachten op het internet", "a user": "een gebruiker", "About": "Over", - "Accept autocomplete generation / Jump to prompt variable": "", + "Accept autocomplete generation / Jump to prompt variable": "Accepteer het genereren van automatisch aanvullen / Spring naar promptvariabele", "Access": "Toegang", "Access Control": "Toegangsbeheer", "Accessible to all users": "Toegankelijk voor alle gebruikers", @@ -24,7 +24,7 @@ "Account Activation Pending": "Accountactivatie in afwachting", "Accurate information": "Accurate informatie", "Actions": "Acties", - "Activate": "", + "Activate": "Activeren", "Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Activeer dit commando door \"/{{COMMAND}}\" in de chat te typen", "Active Users": "Actieve gebruikers", "Add": "Toevoegen", @@ -40,7 +40,7 @@ "Add Group": "Voeg groep toe", "Add Memory": "Voeg geheugen toe", "Add Model": "Voeg model toe", - "Add Reaction": "", + "Add Reaction": "Voeg reactie toe", "Add Tag": "Voeg tag toe", "Add Tags": "Voeg tags toe", "Add text content": "Voeg tekstinhoud toe", @@ -54,35 +54,35 @@ "Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Beheerders hebben altijd toegang tot alle gereedschappen; gebruikers moeten gereedschap toegewezen krijgen per model in de werkruimte.", "Advanced Parameters": "Geavanceerde parameters", "Advanced Params": "Geavanceerde params", - "All": "", + "All": "Alle", "All Documents": "Alle documenten", "All models deleted successfully": "Alle modellen zijn succesvol verwijderd", - "Allow Chat Controls": "", - "Allow Chat Delete": "Sta chatverwijdering toe", - "Allow Chat Deletion": "Sta chatverwijdering toe", - "Allow Chat Edit": "Sta chatwijziging toe", - "Allow File Upload": "Sta bestandenupload toe", + "Allow Chat Controls": "Chatbesturing toestaan", + "Allow Chat Delete": "Chatverwijdering toestaan", + "Allow Chat Deletion": "Chatverwijdering toestaan", + "Allow Chat Edit": "Chatwijziging toestaan", + "Allow File Upload": "Bestandenupload toestaan", "Allow non-local voices": "Niet-lokale stemmen toestaan", "Allow Temporary Chat": "Tijdelijke chat toestaan", "Allow User Location": "Gebruikerslocatie toestaan", "Allow Voice Interruption in Call": "Stemonderbreking tijdens gesprek toestaan", - "Allowed Endpoints": "", + "Allowed Endpoints": "Endpoints toestaan", "Already have an account?": "Heb je al een account?", - "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "", - "Always": "", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Alternatief voor top_p, en streeft naar een evenwicht tussen kwaliteit en variatie. De parameter p vertegenwoordigt de minimumwaarschijnlijkheid dat een token in aanmerking wordt genomen, in verhouding tot de waarschijnlijkheid van het meest waarschijnlijke token. Bijvoorbeeld, met p=0,05 en het meest waarschijnlijke token met een waarschijnlijkheid van 0,9, worden logits met een waarde kleiner dan 0,045 uitgefilterd.", + "Always": "Altijd", + "Always Collapse Code Blocks": "Codeblokken altijd inklappen", + "Always Expand Details": "Details altijd uitklappen", "Amazing": "Geweldig", "an assistant": "een assistent", - "Analyzed": "", - "Analyzing...": "", + "Analyzed": "Geanalyseerd", + "Analyzing...": "Aan het analysiseren...", "and": "en", "and {{COUNT}} more": "en {{COUNT}} meer", "and create a new shared link.": "en maak een nieuwe gedeelde link.", "API Base URL": "API Base URL", "API Key": "API-sleutel", "API Key created.": "API-sleutel aangemaakt.", - "API Key Endpoint Restrictions": "", + "API Key Endpoint Restrictions": "API-sleutel endpoint-beperkingen", "API keys": "API-sleutels", "Application DN": "Applicatie DN", "Application DN Password": "Applicatie", @@ -92,34 +92,34 @@ "Archive All Chats": "Archiveer alle chats", "Archived Chats": "Chatrecord", "archived-chat-export": "gearchiveerde-chat-export", - "Are you sure you want to clear all memories? This action cannot be undone.": "", - "Are you sure you want to delete this channel?": "", - "Are you sure you want to delete this message?": "", + "Are you sure you want to clear all memories? This action cannot be undone.": "Weet je zeker dat je alle herinneringen wil verwijderen? Deze actie kan niet ongedaan worden gemaakt.", + "Are you sure you want to delete this channel?": "Weet je zeker dat je dit kanaal wil verwijderen?", + "Are you sure you want to delete this message?": "Weet je zeker dat je dit bericht wil verwijderen?", "Are you sure you want to unarchive all archived chats?": "Weet je zeker dat je alle gearchiveerde chats wil onarchiveren?", "Are you sure?": "Weet je het zeker?", "Arena Models": "Arenamodellen", "Artifacts": "Artefacten", - "Ask": "", + "Ask": "Vraag", "Ask a question": "Stel een vraag", "Assistant": "Assistent", - "Attach file from knowledge": "", + "Attach file from knowledge": "Voeg bestand uit kennis toe", "Attention to detail": "Attention to detail", - "Attribute for Mail": "", + "Attribute for Mail": "Attribuut voor mail", "Attribute for Username": "Attribuut voor gebruikersnaam", "Audio": "Audio", "August": "Augustus", "Authenticate": "Authenticeer", - "Authentication": "", + "Authentication": "Authenticatie", "Auto-Copy Response to Clipboard": "Antwoord automatisch kopiëren naar klembord", "Auto-playback response": "Automatisch afspelen van antwoord", - "Autocomplete Generation": "", - "Autocomplete Generation Input Max Length": "", + "Autocomplete Generation": "Automatische aanvullingsgeneratie", + "Autocomplete Generation Input Max Length": "Maximale invoerlengte voor automatische aanvullingsgeneratie", "Automatic1111": "Automatic1111", "AUTOMATIC1111 Api Auth String": "Automatic1111 Api Auth String", "AUTOMATIC1111 Base URL": "AUTOMATIC1111 Basis-URL", "AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Basis-URL is verplicht", "Available list": "Beschikbare lijst", - "Available Tool Servers": "", + "Available Tool Servers": "Beschikbare gereedschapservers", "available!": "beschikbaar!", "Awful": "Verschrikkelijk", "Azure AI Speech": "Azure AI-spraak", @@ -131,28 +131,28 @@ "Batch Size (num_batch)": "Batchgrootte (num_batch)", "before": "voor", "Being lazy": "Lui zijn", - "Beta": "", + "Beta": "Beta", "Bing Search V7 Endpoint": "Bing Search V7 Endpoint", "Bing Search V7 Subscription Key": "Bing Search V7 Subscription Key", - "Bocha Search API Key": "", - "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "", + "Bocha Search API Key": ""Bocha Search API-sleutel, + "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Versterken of bestraffen van specifieke tokens voor beperkte reacties. Biaswaarden worden geklemd tussen -100 en 100 (inclusief). (Standaard: none)", "Brave Search API Key": "Brave Search API-sleutel", "By {{name}}": "Op {{name}}", - "Bypass Embedding and Retrieval": "", + "Bypass Embedding and Retrieval": "Embedding en ophalen omzeilen ", "Bypass SSL verification for Websites": "SSL-verificatie omzeilen voor websites", - "Calendar": "", + "Calendar": "Agenda", "Call": "Oproep", "Call feature is not supported when using Web STT engine": "Belfunctie wordt niet ondersteund bij gebruik van de Web STT engine", "Camera": "Camera", "Cancel": "Annuleren", "Capabilities": "Mogelijkheden", - "Capture": "", + "Capture": "Vangen", "Certificate Path": "Certificaatpad", "Change Password": "Wijzig Wachtwoord", - "Channel Name": "", - "Channels": "", + "Channel Name": "Kanaalnaam", + "Channels": "Kanalen", "Character": "Karakter", - "Character limit for autocomplete generation input": "", + "Character limit for autocomplete generation input": "Karakterlimiet voor automatische generatieinvoer", "Chart new frontiers": "Verken nieuwe grenzen", "Chat": "Chat", "Chat Background Image": "Chatachtergrond", @@ -172,14 +172,14 @@ "Ciphers": "Versleutelingen", "Citation": "Citaat", "Clear memory": "Geheugen wissen", - "Clear Memory": "", + "Clear Memory": "Geheugen wissen", "click here": "klik hier", "Click here for filter guides.": "Klik hier voor filterhulp", "Click here for help.": "Klik hier voor hulp.", "Click here to": "Klik hier om", "Click here to download user import template file.": "Klik hier om het sjabloonbestand voor gebruikersimport te downloaden.", "Click here to learn more about faster-whisper and see the available models.": "Klik hier om meer te leren over faster-whisper en de beschikbare modellen te bekijken.", - "Click here to see available models.": "", + "Click here to see available models.": "Klik hier om beschikbare modellen te zien", "Click here to select": "Klik hier om te selecteren", "Click here to select a csv file.": "Klik hier om een csv file te selecteren.", "Click here to select a py file.": "Klik hier om een py-bestand te selecteren.", @@ -188,22 +188,22 @@ "Click on the user role button to change a user's role.": "Klik op de gebruikersrol knop om de rol van een gebruiker te wijzigen.", "Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Klembord schrijftoestemming geweigerd. Kijk je browserinstellingen na om de benodigde toestemming te geven.", "Clone": "Kloon", - "Clone Chat": "", - "Clone of {{TITLE}}": "", + "Clone Chat": "Kloon chat", + "Clone of {{TITLE}}": "Kloon van {{TITLE}}", "Close": "Sluiten", "Code execution": "Code uitvoeren", - "Code Execution": "", - "Code Execution Engine": "", - "Code Execution Timeout": "", + "Code Execution": "Code-uitvoer", + "Code Execution Engine": "Code-uitvoer engine", + "Code Execution Timeout": "Code-uitvoer time-out", "Code formatted successfully": "Code succesvol geformateerd", - "Code Interpreter": "", - "Code Interpreter Engine": "", - "Code Interpreter Prompt Template": "", - "Collapse": "", + "Code Interpreter": "Code-interpretatie", + "Code Interpreter Engine": "Code-interpretatie engine", + "Code Interpreter Prompt Template": "Code-interpretatie promptsjabloon", + "Collapse": "Inklappen", "Collection": "Verzameling", "Color": "Kleur", "ComfyUI": "ComfyUI", - "ComfyUI API Key": "", + "ComfyUI API Key": "ComfyUI API-sleutel", "ComfyUI Base URL": "ComfyUI Base URL", "ComfyUI Base URL is required.": "ComfyUI Base URL is required.", "ComfyUI Workflow": "ComfyUI workflow", @@ -214,24 +214,24 @@ "Configure": "Configureer", "Confirm": "Bevestigen", "Confirm Password": "Bevestig wachtwoord", - "Confirm your action": "Bevestig uw actie", - "Confirm your new password": "", - "Connect to your own OpenAI compatible API endpoints.": "", - "Connect to your own OpenAPI compatible external tool servers.": "", + "Confirm your action": "Bevestig je actie", + "Confirm your new password": "Bevestig je nieuwe wachtwoord", + "Connect to your own OpenAI compatible API endpoints.": "Verbind met je eigen OpenAI-compatibele API-endpoints", + "Connect to your own OpenAPI compatible external tool servers.": "Verbind met je eigen OpenAPI-compatibele externe gereedschapservers", "Connections": "Verbindingen", - "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "", + "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Beperkt de redeneerinspanning voor redeneermodellen. Alleen van toepassing op redeneermodellen van specifieke providers die redeneerinspanning ondersteunen.", "Contact Admin for WebUI Access": "Neem contact op met de beheerder voor WebUI-toegang", "Content": "Inhoud", - "Content Extraction Engine": "", + "Content Extraction Engine": "Inhoudsextractie engine", "Context Length": "Contextlengte", "Continue Response": "Doorgaan met antwoord", "Continue with {{provider}}": "Ga verder met {{provider}}", "Continue with Email": "Ga door met E-mail", "Continue with LDAP": "Ga door met LDAP", "Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Bepaal hoe berichttekst wordt opgesplitst voor TTS-verzoeken. 'Leestekens' splitst op in zinnen, 'alinea's' splitst op in paragrafen en 'geen' houdt het bericht als een enkele string.", - "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "", + "Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "Controleer de herhaling van tokenreeksen in de gegenereerde tekst. Een hogere waarde (bijv. 1,5) zal herhalingen sterker bestraffen, terwijl een lagere waarde (bijv. 1,1) milder zal zijn. Bij 1 is het uitgeschakeld.", "Controls": "Besturingselementen", - "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "", + "Controls the balance between coherence and diversity of thEnable Mirostat sampling for controlling perplexity.e output. A lower value will result in more focused and coherent text.": "Regelt de balans tussen coherentie en diversiteit van de uitvoer. Een lagere waarde resulteert in een meer gerichte en coherente tekst.", "Copied": "Gekopieerd", "Copied shared chat URL to clipboard!": "URL van gedeelde gesprekspagina gekopieerd naar klembord!", "Copied to clipboard": "Gekopieerd naar klembord", @@ -241,13 +241,13 @@ "Copy Link": "Kopieer link", "Copy to clipboard": "Kopier naar klembord", "Copying to clipboard was successful!": "Kopiëren naar klembord was succesvol!", - "CORS must be properly configured by the provider to allow requests from Open WebUI.": "", + "CORS must be properly configured by the provider to allow requests from Open WebUI.": "CORS moet goed geconfigureerd zijn bij de provider om verzoeken van Open WebUI toe te staan", "Create": "Aanmaken", "Create a knowledge base": "Maak een kennisbasis aan", "Create a model": "Een model maken", "Create Account": "Maak account", "Create Admin Account": "Maak admin-account", - "Create Channel": "", + "Create Channel": "Maak kanaal", "Create Group": "Maak groep", "Create Knowledge": "Creër kennis", "Create new key": "Maak nieuwe sleutel", @@ -256,18 +256,18 @@ "Created At": "Gemaakt op", "Created by": "Gemaakt door", "CSV Import": "CSV import", - "Ctrl+Enter to Send": "", + "Ctrl+Enter to Send": "Ctrl+Enter om te sturen", "Current Model": "Huidig model", "Current Password": "Huidig wachtwoord", "Custom": "Aangepast", - "Danger Zone": "", + "Danger Zone": "Gevarenzone", "Dark": "Donker", "Database": "Database", "December": "December", "Default": "Standaard", "Default (Open AI)": "Standaard (Open AI)", "Default (SentenceTransformers)": "Standaard (SentenceTransformers)", - "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model’s built-in tool-calling capabilities, but requires the model to inherently support this feature.": "", + "Default mode works with a wider range of models by calling tools once before execution. Native mode leverages the model’s built-in tool-calling capabilities, but requires the model to inherently support this feature.": "De standaardmodus werkt met een breder scala aan modellen door gereedschappen één keer aan te roepen voordat ze worden uitgevoerd. De native modus maakt gebruik van de ingebouwde mogelijkheden van het model om gereedschappen aan te roepen, maar vereist dat het model deze functie inherent ondersteunt.", "Default Model": "Standaardmodel", "Default model updated": "Standaardmodel bijgewerkt", "Default Models": "Standaardmodellen", @@ -275,8 +275,8 @@ "Default permissions updated successfully": "Standaardrechten succesvol bijgewerkt", "Default Prompt Suggestions": "Standaard Prompt Suggesties", "Default to 389 or 636 if TLS is enabled": "Standaard 389 of 636 als TLS is ingeschakeld", - "Default to ALL": "Standaar op ALL", - "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "", + "Default to ALL": "Standaard op ALL", + "Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.": "Standaard gesegmenteerd ophalen voor gerichte en relevante inhoudsextractie, dit wordt aanbevolen voor de meeste gevallen.", "Default User Role": "Standaard gebruikersrol", "Delete": "Verwijderen", "Delete a model": "Verwijder een model", @@ -287,8 +287,8 @@ "Delete chat?": "Verwijder chat?", "Delete folder?": "Verwijder map?", "Delete function?": "Verwijder functie?", - "Delete Message": "", - "Delete message?": "", + "Delete Message": "Verwijder bericht", + "Delete message?": "Verwijder bericht", "Delete prompt?": "Verwijder prompt?", "delete this link": "verwijder deze link", "Delete tool?": "Verwijder tool?", @@ -299,16 +299,16 @@ "Describe your knowledge base and objectives": "Beschrijf je kennisbasis en doelstellingen", "Description": "Beschrijving", "Didn't fully follow instructions": "Heeft niet alle instructies gevolgt", - "Direct": "", - "Direct Connections": "", - "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "", - "Direct Connections settings updated": "", + "Direct": "Direct", + "Direct Connections": "Directe verbindingen", + "Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Directe verbindingen stellen gebruikers in staat om met hun eigen OpenAI compatibele API-endpoints te verbinden.", + "Direct Connections settings updated": "Directe verbindingsopties bijgewerkt", "Disabled": "Uitgeschakeld", "Discover a function": "Ontdek een functie", "Discover a model": "Ontdek een model", "Discover a prompt": "Ontdek een prompt", "Discover a tool": "Ontdek een tool", - "Discover how to use Open WebUI and seek support from the community.": "", + "Discover how to use Open WebUI and seek support from the community.": "Ontdek hoe je Open WebUI gebruikt en zoek ondersteuning van de community.", "Discover wonders": "Ontdek wonderen", "Discover, download, and explore custom functions": "Ontdek, download en verken aangepaste functies", "Discover, download, and explore custom prompts": "Ontdek, download en verken aangepaste prompts", @@ -322,15 +322,15 @@ "Dive into knowledge": "Duik in kennis", "Do not install functions from sources you do not fully trust.": "Installeer geen functies vanuit bronnen die je niet volledig vertrouwt", "Do not install tools from sources you do not fully trust.": "Installeer geen tools vanuit bronnen die je niet volledig vertrouwt.", - "Docling": "", - "Docling Server URL required.": "", + "Docling": "Docling", + "Docling Server URL required.": "Docling server-URL benodigd", "Document": "Document", - "Document Intelligence": "", - "Document Intelligence endpoint and key required.": "", + "Document Intelligence": "Document Intelligence", + "Document Intelligence endpoint and key required.": "Document Intelligence-endpoint en -sleutel benodigd", "Documentation": "Documentatie", "Documents": "Documenten", "does not make any external connections, and your data stays securely on your locally hosted server.": "maakt geen externe verbindingen, en je gegevens blijven veilig op je lokaal gehoste server.", - "Domain Filter List": "", + "Domain Filter List": "Domein-filterlijst", "Don't have an account?": "Heb je geen account?", "don't install random functions from sources you don't trust.": "installeer geen willekeurige functies van bronnen die je niet vertrouwd", "don't install random tools from sources you don't trust.": "installeer geen willekeurige gereedschappen van bronnen die je niet vertrouwd", @@ -344,8 +344,8 @@ "Draw": "Teken", "Drop any files here to add to the conversation": "Sleep hier bestanden om toe te voegen aan het gesprek", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "bijv. '30s', '10m'. Geldige tijdseenheden zijn 's', 'm', 'h'.", - "e.g. \"json\" or a JSON schema": "", - "e.g. 60": "", + "e.g. \"json\" or a JSON schema": "bijv. \"json\" of een JSON-schema", + "e.g. 60": "bijv. 60", "e.g. A filter to remove profanity from text": "bijv. Een filter om gevloek uit tekst te verwijderen", "e.g. My Filter": "bijv. Mijn filter", "e.g. My Tools": "bijv. Mijn gereedschappen", @@ -354,32 +354,32 @@ "e.g. Tools for performing various operations": "Gereedschappen om verschillende bewerkingen uit te voeren", "Edit": "Wijzig", "Edit Arena Model": "Bewerk arenamodel", - "Edit Channel": "", + "Edit Channel": "Bewerk kanaal", "Edit Connection": "Bewerk connectie", - "Edit Default Permissions": "Standaardrechten bewerken", + "Edit Default Permissions": "Bewerk standaardrechten", "Edit Memory": "Bewerk geheugen", "Edit User": "Wijzig gebruiker", "Edit User Group": "Bewerk gebruikergroep", "ElevenLabs": "ElevenLabs", "Email": "E-mail", "Embark on adventures": "Ga op avonturen", - "Embedding": "", + "Embedding": "Embedding", "Embedding Batch Size": "Embedding batchgrootte", "Embedding Model": "Embedding Model", "Embedding Model Engine": "Embedding Model Engine", "Embedding model set to \"{{embedding_model}}\"": "Embedding model ingesteld op \"{{embedding_model}}\"", - "Enable API Key": "", - "Enable autocomplete generation for chat messages": "", - "Enable Code Execution": "", - "Enable Code Interpreter": "", + "Enable API Key": "API-sleutel inschakelen", + "Enable autocomplete generation for chat messages": "Automatische aanvullingsgeneratie voor chatberichten inschakelen", + "Enable Code Execution": "Code-uitvoer inschakelen", + "Enable Code Interpreter": "Code-interpretatie inschakelen", "Enable Community Sharing": "Delen via de community inschakelen", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Schakel Memory Locking (mlock) in om te voorkomen dat modelgegevens uit het RAM worden verwisseld. Deze optie vergrendelt de werkset pagina's van het model in het RAM, zodat ze niet naar de schijf worden uitgewisseld. Dit kan helpen om de prestaties op peil te houden door paginafouten te voorkomen en snelle gegevenstoegang te garanderen.", "Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Schakel Memory Mapping (mmap) in om modelgegevens te laden. Deze optie laat het systeem schijfopslag gebruiken als een uitbreiding van RAM door schijfbestanden te behandelen alsof ze in RAM zitten. Dit kan de prestaties van het model verbeteren door snellere gegevenstoegang mogelijk te maken. Het is echter mogelijk dat deze optie niet op alle systemen correct werkt en een aanzienlijke hoeveelheid schijfruimte in beslag kan nemen.", "Enable Message Rating": "Schakel berichtbeoordeling in", - "Enable Mirostat sampling for controlling perplexity.": "", + "Enable Mirostat sampling for controlling perplexity.": "Mirostat-sampling in om perplexiteit te controleren inschakelen.", "Enable New Sign Ups": "Schakel nieuwe registraties in", "Enabled": "Ingeschakeld", - "Enforce Temporary Chat": "", + "Enforce Temporary Chat": "Tijdelijke chat afdwingen", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Zorg ervoor dat uw CSV-bestand de volgende vier kolommen in deze volgorde bevat: Naam, E-mail, Wachtwoord, Rol.", "Enter {{role}} message here": "Voeg {{role}} bericht hier toe", "Enter a detail about yourself for your LLMs to recall": "Voer een detail over jezelf in zodat LLM's het kunnen onthouden", @@ -388,37 +388,37 @@ "Enter Application DN Password": "Voer applicatie-DN wachtwoord in", "Enter Bing Search V7 Endpoint": "Voer Bing Search V7 Endpoint in", "Enter Bing Search V7 Subscription Key": "Voer Bing Search V7 abonnementscode in", - "Enter Bocha Search API Key": "", + "Enter Bocha Search API Key": "Voer Bocha Search API-sleutel in", "Enter Brave Search API Key": "Voer de Brave Search API-sleutel in", "Enter certificate path": "Voer certificaatpad in", "Enter CFG Scale (e.g. 7.0)": "Voer CFG schaal in (bv. 7.0)", "Enter Chunk Overlap": "Voeg Chunk Overlap toe", "Enter Chunk Size": "Voeg Chunk Size toe", - "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "", + "Enter comma-seperated \"token:bias_value\" pairs (example: 5432:100, 413:-100)": "Voer kommagescheiden \"token:bias_waarde\" paren in (bijv. 5432:100, 413:-100)", "Enter description": "Voer beschrijving in", - "Enter Docling Server URL": "", - "Enter Document Intelligence Endpoint": "", - "Enter Document Intelligence Key": "", - "Enter domains separated by commas (e.g., example.com,site.org)": "", - "Enter Exa API Key": "", + "Enter Docling Server URL": "Voer Docling Server-URL in", + "Enter Document Intelligence Endpoint": "Voer Document Intelligence endpoint in", + "Enter Document Intelligence Key": "Voer Document Intelligence sleutel in", + "Enter domains separated by commas (e.g., example.com,site.org)": "Voer domeinen in gescheiden met komma's (bijv., voorbeeld.com,site.org)", + "Enter Exa API Key": "Voer Exa API-sleutel in", "Enter Github Raw URL": "Voer de Github Raw-URL in", "Enter Google PSE API Key": "Voer de Google PSE API-sleutel in", "Enter Google PSE Engine Id": "Voer Google PSE Engine-ID in", "Enter Image Size (e.g. 512x512)": "Voeg afbeelding formaat toe (Bijv. 512x512)", "Enter Jina API Key": "Voer Jina API-sleutel in", - "Enter Jupyter Password": "", - "Enter Jupyter Token": "", - "Enter Jupyter URL": "", - "Enter Kagi Search API Key": "", - "Enter Key Behavior": "", - "Enter language codes": "Voeg taal codes toe", + "Enter Jupyter Password": "Voer Jupyter-wachtwoord in", + "Enter Jupyter Token": "Voer Jupyter-token in", + "Enter Jupyter URL": "Voer Jupyter-URL in", + "Enter Kagi Search API Key": "Voer Kagi Search API-sleutel in", + "Enter Key Behavior": "Voer sleutelgedrag in", + "Enter language codes": "Voeg taalcodes toe", "Enter Model ID": "Voer model-ID in", "Enter model tag (e.g. {{modelTag}})": "Voeg model-tag toe (Bijv. {{modelTag}})", "Enter Mojeek Search API Key": "Voer Mojeek Search API-sleutel in", "Enter Number of Steps (e.g. 50)": "Voeg aantal stappen toe (Bijv. 50)", - "Enter Perplexity API Key": "", - "Enter proxy URL (e.g. https://user:password@host:port)": "", - "Enter reasoning effort": "", + "Enter Perplexity API Key": "Voer Perplexity API-sleutel in", + "Enter proxy URL (e.g. https://user:password@host:port)": "Voer proxy-URL in (bijv. https://gebruiker:wachtwoord@host:port)", + "Enter reasoning effort": "Voer redeneerinspanning in", "Enter Sampler (e.g. Euler a)": "Voer Sampler in (bv. Euler a)", "Enter Scheduler (e.g. Karras)": "Voer Scheduler in (bv. Karras)", "Enter Score": "Voeg score toe", @@ -426,8 +426,8 @@ "Enter SearchApi Engine": "Voer SearchApi-Engine in", "Enter Searxng Query URL": "Voer de URL van de Searxng-query in", "Enter Seed": "Voer Seed in", - "Enter SerpApi API Key": "", - "Enter SerpApi Engine": "", + "Enter SerpApi API Key": "Voer SerpApi API-sleutel in", + "Enter SerpApi Engine": "Voer SerpApi-engine in", "Enter Serper API Key": "Voer de Serper API-sleutel in", "Enter Serply API Key": "Voer Serply API-sleutel in", "Enter Serpstack API Key": "Voer de Serpstack API-sleutel in", @@ -437,42 +437,42 @@ "Enter stop sequence": "Voer stopsequentie in", "Enter system prompt": "Voer systeem prompt in", "Enter Tavily API Key": "Voer Tavily API-sleutel in", - "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "", + "Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Voer de publieke URL van je WebUI in. Deze URL wordt gebruikt om links in de notificaties te maken.", "Enter Tika Server URL": "Voer Tika Server URL in", - "Enter timeout in seconds": "", - "Enter to Send": "", + "Enter timeout in seconds": "Voer time-out in seconden in", + "Enter to Send": "Enter om te sturen", "Enter Top K": "Voeg Top K toe", - "Enter Top K Reranker": "", + "Enter Top K Reranker": "Voer Tok K reranker in", "Enter URL (e.g. http://127.0.0.1:7860/)": "Voer URL in (Bijv. http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Voer URL in (Bijv. http://localhost:11434)", - "Enter your current password": "", + "Enter your current password": "Voer je huidige wachtwoord in", "Enter Your Email": "Voer je Email in", "Enter Your Full Name": "Voer je Volledige Naam in", "Enter your message": "Voer je bericht in", - "Enter your new password": "", - "Enter Your Password": "Voer je Wachtwoord in", - "Enter Your Role": "Voer je Rol in", + "Enter your new password": "Voer je nieuwe wachtwoord in", + "Enter Your Password": "Voer je wachtwoord in", + "Enter Your Role": "Voer je rol in", "Enter Your Username": "Voer je gebruikersnaam in", - "Enter your webhook URL": "", + "Enter your webhook URL": "Voer je webhook-URL in", "Error": "Fout", "ERROR": "ERROR", - "Error accessing Google Drive: {{error}}": "", - "Error uploading file: {{error}}": "", + "Error accessing Google Drive: {{error}}": "Fout bij het benaderen van Google Drive: {{error}}", + "Error uploading file: {{error}}": "Error bij het uploaden van bestand: {{error}}", "Evaluations": "Beoordelingen", - "Exa API Key": "", + "Exa API Key": "Exa API-sleutel", "Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Voorbeeld: (&(objectClass=inetOrgPerson)(uid=%s))", "Example: ALL": "Voorbeeld: ALL", - "Example: mail": "", + "Example: mail": "Voorbeeld: mail", "Example: ou=users,dc=foo,dc=example": "Voorbeeld: ou=users,dc=foo,dc=example", "Example: sAMAccountName or uid or userPrincipalName": "Voorbeeld: sAMAccountName or uid or userPrincipalName", - "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "", + "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Het aantal seats in uw licentie is overschreden. Neem contact op met support om het aantal seats te verhogen.", "Exclude": "Sluit uit", - "Execute code for analysis": "", - "Executing `{{NAME}}`...": "", - "Expand": "", + "Execute code for analysis": "Voer code uit voor analyse", + "Executing `{{NAME}}`...": "`{{NAME}}`... aan het uitvoeren", + "Expand": "Uitbreiden", "Experimental": "Experimenteel", - "Explain": "", - "Explain this section to me in more detail": "", + "Explain": "Leg uit", + "Explain this section to me in more detail": "Leg me dit gedeelte in meer detail uit", "Explore the cosmos": "Ontdek de kosmos", "Export": "Exporteren", "Export All Archived Chats": "Exporteer alle gearchiveerde chats", @@ -486,18 +486,18 @@ "Export Prompts": "Exporteer Prompts", "Export to CSV": "Exporteer naar CSV", "Export Tools": "Exporteer gereedschappen", - "External": "", + "External": "Extern", "External Models": "Externe modules", "Failed to add file.": "Het is niet gelukt om het bestand toe te voegen.", - "Failed to connect to {{URL}} OpenAPI tool server": "", + "Failed to connect to {{URL}} OpenAPI tool server": "Kan geen verbinding maken met {{URL}} OpenAPI gereedschapserver", "Failed to create API Key.": "Kan API Key niet aanmaken.", - "Failed to fetch models": "", + "Failed to fetch models": "Kan modellen niet ophalen", "Failed to read clipboard contents": "Kan klembord inhoud niet lezen", "Failed to save models configuration": "Het is niet gelukt om de modelconfiguratie op te slaan", "Failed to update settings": "Instellingen konden niet worden bijgewerkt.", "Failed to upload file.": "Bestand kon niet worden geüpload.", - "Features": "", - "Features Permissions": "", + "Features": "Functies", + "Features Permissions": "Functietoestemmingen", "February": "Februari", "Feedback History": "Feedback geschiedenis", "Feedbacks": "Feedback", @@ -509,7 +509,7 @@ "File not found.": "Bestand niet gevonden.", "File removed successfully.": "Bestand succesvol verwijderd.", "File size should not exceed {{maxSize}} MB.": "Bestandsgrootte mag niet groter zijn dan {{maxSize}} MB.", - "File uploaded successfully": "", + "File uploaded successfully": "Bestand succesvol upgeload", "Files": "Bestanden", "Filter is now globally disabled": "Filter is nu globaal uitgeschakeld", "Filter is now globally enabled": "Filter is nu globaal ingeschakeld", @@ -541,19 +541,19 @@ "Functions allow arbitrary code execution": "Functies staan willekeurige code-uitvoering toe", "Functions allow arbitrary code execution.": "Functies staan willekeurige code-uitvoering toe", "Functions imported successfully": "Functies succesvol geïmporteerd", - "Gemini": "", - "Gemini API Config": "", - "Gemini API Key is required.": "", + "Gemini": "Gemini", + "Gemini API Config": "Gemini API-configuratie", + "Gemini API Key is required.": "Gemini API-sleutel is vereisd", "General": "Algemeen", - "Generate an image": "", + "Generate an image": "Genereer een afbeelding", "Generate Image": "Genereer afbeelding", - "Generate prompt pair": "", + "Generate prompt pair": "Genereer promptpaar", "Generating search query": "Zoekopdracht genereren", "Get started": "Begin", "Get started with {{WEBUI_NAME}}": "Begin met {{WEBUI_NAME}}", "Global": "Globaal", "Good Response": "Goed antwoord", - "Google Drive": "", + "Google Drive": "Google Drive", "Google PSE API Key": "Google PSE API-sleutel", "Google PSE Engine Id": "Google PSE-engine-ID", "Group created successfully": "Groep succesvol aangemaakt", @@ -570,8 +570,8 @@ "Hex Color": "Hex-kleur", "Hex Color - Leave empty for default color": "Hex-kleur - laat leeg voor standaardkleur", "Hide": "Verberg", - "Hide Model": "", - "Home": "", + "Hide Model": "Verberg model", + "Home": "Thuis", "Host": "Host", "How can I help you today?": "Hoe kan ik je vandaag helpen?", "How would you rate this response?": "Hoe zou je dit antwoord beoordelen?", @@ -579,14 +579,14 @@ "I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Ik bevestig dat ik de implicaties van mijn actie heb gelezen en begrepen. Ik ben me bewust van de risico's die gepaard gaan met het uitvoeren van willekeurige code en ik heb de betrouwbaarheid van de bron gecontroleerd.", "ID": "ID", "Ignite curiosity": "Wakker nieuwsgierigheid aan", - "Image": "", - "Image Compression": "", - "Image Generation": "", + "Image": "Afbeelding", + "Image Compression": "Afbeeldingscompressie", + "Image Generation": "Afbeeldingsgeneratie", "Image Generation (Experimental)": "Afbeeldingsgeneratie (Experimenteel)", "Image Generation Engine": "Afbeeldingsgeneratie Engine", - "Image Max Compression Size": "", - "Image Prompt Generation": "", - "Image Prompt Generation Prompt": "", + "Image Max Compression Size": "Maximale afbeeldingscompressiegrootte", + "Image Prompt Generation": "Afbeeldingspromptgeneratie", + "Image Prompt Generation Prompt": "Afbeeldingspromptgeneratie prompt", "Image Settings": "Afbeeldingsinstellingen", "Images": "Afbeeldingen", "Import Chats": "Importeer Chats", @@ -599,18 +599,18 @@ "Include": "Voeg toe", "Include `--api-auth` flag when running stable-diffusion-webui": "Voeg '--api-auth` toe bij het uitvoeren van stable-diffusion-webui", "Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui", - "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "", + "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "Beïnvloedt hoe snel het algoritme reageert op feedback van de gegenereerde tekst. Een lagere leersnelheid resulteert in langzamere aanpassingen, terwijl een hogere leersnelheid het algoritme responsiever maakt.", "Info": "Info", - "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "", + "Inject the entire content as context for comprehensive processing, this is recommended for complex queries.": "Injecteer de volledige inhoud als context voor uitgebreide verwerking, dit wordt aanbevolen voor complexe query's.", "Input commands": "Voer commando's in", "Install from Github URL": "Installeren vanaf Github-URL", "Instant Auto-Send After Voice Transcription": "Direct automatisch verzenden na spraaktranscriptie", - "Integration": "", + "Integration": "Integratie", "Interface": "Interface", "Invalid file format.": "Ongeldig bestandsformaat", - "Invalid JSON schema": "", + "Invalid JSON schema": "Ongeldig JSON-schema", "Invalid Tag": "Ongeldige Tag", - "is typing...": "", + "is typing...": "is aan het schrijven...", "January": "Januari", "Jina API Key": "Jina API-sleutel", "join our Discord for help.": "join onze Discord voor hulp.", @@ -618,11 +618,11 @@ "JSON Preview": "JSON-voorbeeld", "July": "Juli", "June": "Juni", - "Jupyter Auth": "", - "Jupyter URL": "", + "Jupyter Auth": "Jupyter Auth", + "Jupyter URL": "Jupyter URL", "JWT Expiration": "JWT Expiration", "JWT Token": "JWT Token", - "Kagi Search API Key": "", + "Kagi Search API Key": "Kagi Search API-sleutel", "Keep Alive": "Houd Actief", "Key": "Sleutel", "Keyboard shortcuts": "Toetsenbord snelkoppelingen", @@ -630,50 +630,50 @@ "Knowledge Access": "Kennistoegang", "Knowledge created successfully.": "Kennis succesvol aangemaakt", "Knowledge deleted successfully.": "Kennis succesvol verwijderd", - "Knowledge Public Sharing": "", + "Knowledge Public Sharing": "Publieke kennisdeling", "Knowledge reset successfully.": "Kennis succesvol gereset", "Knowledge updated successfully": "Kennis succesvol bijgewerkt", - "Kokoro.js (Browser)": "", - "Kokoro.js Dtype": "", + "Kokoro.js (Browser)": "Kokoro.js (Browser)", + "Kokoro.js Dtype": "Kokoro.js Dtype", "Label": "Label", "Landing Page Mode": "Landingspaginamodus", "Language": "Taal", "Last Active": "Laatst Actief", "Last Modified": "Laatst aangepast", - "Last reply": "", + "Last reply": "Laatste antwoord", "LDAP": "LDAP", "LDAP server updated": "LDAP-server bijgewerkt", "Leaderboard": "Klassement", "Leave empty for unlimited": "Laat leeg voor ongelimiteerd", - "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{url}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "Laat leeg om alle modellen van het \"{{url}}/api/tags\"-endpoint mee te nemen", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "Laat leeg om alle modellen van \"{{url}}/models\"-endpoint mee te nemen", "Leave empty to include all models or select specific models": "Laat leeg om alle modellen mee te nemen, of selecteer specifieke modellen", "Leave empty to use the default prompt, or enter a custom prompt": "Laat leeg om de standaard prompt te gebruiken, of selecteer een aangepaste prompt", - "Leave model field empty to use the default model.": "", - "License": "", + "Leave model field empty to use the default model.": "Laat modelveld leeg om het standaardmodel te gebruiken.", + "License": "Licentie", "Light": "Licht", "Listening...": "Aan het luisteren...", - "Llama.cpp": "", + "Llama.cpp": "Llama.cpp", "LLMs can make mistakes. Verify important information.": "LLMs kunnen fouten maken. Verifieer belangrijke informatie.", - "Loader": "", - "Loading Kokoro.js...": "", + "Loader": "Lader", + "Loading Kokoro.js...": "Kokoro.js aan het laden", "Local": "Lokaal", "Local Models": "Lokale modellen", - "Location access not allowed": "", - "Logit Bias": "", + "Location access not allowed": "Locatietoegang niet toegestaan", + "Logit Bias": "Logit bias", "Lost": "Verloren", "LTR": "LNR", "Made by Open WebUI Community": "Gemaakt door OpenWebUI Community", "Make sure to enclose them with": "Zorg ervoor dat je ze omringt met", "Make sure to export a workflow.json file as API format from ComfyUI.": "Zorg ervoor dat je een workflow.json-bestand als API-formaat exporteert vanuit ComfyUI.", "Manage": "Beheren", - "Manage Direct Connections": "", - "Manage Models": "", + "Manage Direct Connections": "Beheer directe verbindingen", + "Manage Models": "Beheer modellen", "Manage Ollama": "Beheer Ollama", "Manage Ollama API Connections": "Beheer Ollama API-verbindingen", "Manage OpenAI API Connections": "Beheer OpenAI API-verbindingen", "Manage Pipelines": "Pijplijnen beheren", - "Manage Tool Servers": "", + "Manage Tool Servers": "Beheer gereedschapservers", "March": "Maart", "Max Tokens (num_predict)": "Max Tokens (num_predict)", "Max Upload Count": "Maximale Uploadhoeveelheid", @@ -700,8 +700,8 @@ "Model {{modelId}} not found": "Model {{modelId}} niet gevonden", "Model {{modelName}} is not vision capable": "Model {{modelName}} is niet geschikt voor visie", "Model {{name}} is now {{status}}": "Model {{name}} is nu {{status}}", - "Model {{name}} is now hidden": "", - "Model {{name}} is now visible": "", + "Model {{name}} is now hidden": "Model {{naam}} is nu verborgen", + "Model {{name}} is now visible": "Model {{naam}} is nu zichtbaar", "Model accepts image inputs": "Model accepteerd afbeeldingsinvoer", "Model created successfully!": "Model succesvol gecreëerd", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filesystem path gedetecteerd. Model shortname is vereist voor update, kan niet doorgaan.", @@ -713,21 +713,21 @@ "Model Params": "Modelparams", "Model Permissions": "Modeltoestemmingen", "Model updated successfully": "Model succesvol bijgewerkt", - "Modelfile Content": "Modelfile Inhoud", + "Modelfile Content": "Modelfile-inhoud", "Models": "Modellen", "Models Access": "Modellentoegang", - "Models configuration saved successfully": "Modellenconfiguratie succeslvol opgeslagen", - "Models Public Sharing": "", + "Models configuration saved successfully": "Modellenconfiguratie succesvol opgeslagen", + "Models Public Sharing": "Modellen publiek delen", "Mojeek Search API Key": "Mojeek Search API-sleutel", "more": "Meer", "More": "Meer", "Name": "Naam", "Name your knowledge base": "Geef je kennisbasis een naam", - "Native": "", + "Native": "Native", "New Chat": "Nieuwe Chat", - "New Folder": "", + "New Folder": "Nieuwe map", "New Password": "Nieuw Wachtwoord", - "new-channel": "", + "new-channel": "nieuw-kanaal", "No content found": "Geen content gevonden", "No content to speak": "Geen inhoud om over te spreken", "No distance available": "Geen afstand beschikbaar", @@ -738,7 +738,7 @@ "No HTML, CSS, or JavaScript content found.": "Geen HTML, CSS, of JavaScript inhoud gevonden", "No inference engine with management support found": "", "No knowledge found": "Geen kennis gevonden", - "No memories to clear": "", + "No memories to clear": "Geen herinneringen om op te ruimen", "No model IDs": "Geen model-ID's", "No models found": "Geen modellen gevonden", "No models selected": "Geen modellen geselecteerd", @@ -752,8 +752,8 @@ "Not helpful": "Niet nuttig", "Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Opmerking: Als je een minimumscore instelt, levert de zoekopdracht alleen documenten op met een score groter dan of gelijk aan de minimumscore.", "Notes": "Aantekeningen", - "Notification Sound": "", - "Notification Webhook": "", + "Notification Sound": "Notificatiegeluid", + "Notification Webhook": "Notificatie-webhook", "Notifications": "Notificaties", "November": "November", "num_gpu (Ollama)": "num_gpu (Ollama)", @@ -768,7 +768,7 @@ "Ollama API settings updated": "Ollama API-instellingen bijgewerkt", "Ollama Version": "Ollama Versie", "On": "Aan", - "OneDrive": "", + "OneDrive": "OneDrive", "Only alphanumeric characters and hyphens are allowed": "Alleen alfanumerieke tekens en koppeltekens zijn toegestaan", "Only alphanumeric characters and hyphens are allowed in the command string.": "Alleen alfanumerieke karakters en streepjes zijn toegestaan in de commando string.", "Only collections can be edited, create a new knowledge base to edit/add documents.": "Alleen verzamelinge kunnen gewijzigd worden, maak een nieuwe kennisbank aan om bestanden aan te passen/toe te voegen", From 88b93241dbbcf96f0b22ca10fc3880cc8ba26b3a Mon Sep 17 00:00:00 2001 From: Alex <70099710+Xelaph@users.noreply.github.com> Date: Tue, 1 Apr 2025 15:20:04 +0200 Subject: [PATCH 511/623] Update translation.json --- src/lib/i18n/locales/nl-NL/translation.json | 102 ++++++++++---------- 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 54b3722f922..6ce9e1fd46b 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -802,10 +802,10 @@ "PDF Extract Images (OCR)": "PDF extraheer afbeeldingen (OCR)", "pending": "wachtend", "Permission denied when accessing media devices": "Toegang geweigerd bij het toegang krijgen tot media-apparaten", - "Permission denied when accessing microphone": "Toegang geweigerd bij het toegang krijgen tot de microfoon", + "Permission denied when accessing microphone": "Toegang geweigerd bij toegang tot de microfoon", "Permission denied when accessing microphone: {{error}}": "Toestemming geweigerd bij toegang tot microfoon: {{error}}", "Permissions": "Toestemmingen", - "Perplexity API Key": "", + "Perplexity API Key": "Perplexity API-sleutel", "Personalization": "Personalisatie", "Pin": "Zet vast", "Pinned": "Vastgezet", @@ -818,44 +818,44 @@ "Plain text (.txt)": "Platte tekst (.txt)", "Playground": "Speeltuin", "Please carefully review the following warnings:": "Beoordeel de volgende waarschuwingen nauwkeurig:", - "Please do not close the settings page while loading the model.": "", + "Please do not close the settings page while loading the model.": "Sluit de instellingenpagina niet terwijl het model geladen wordt.", "Please enter a prompt": "Voer een prompt in", "Please fill in all fields.": "Voer alle velden in", - "Please select a model first.": "", - "Please select a model.": "", + "Please select a model first.": "Selecteer eerst een model", + "Please select a model.": "Selecteer een model", "Please select a reason": "Voer een reden in", "Port": "Poort", "Positive attitude": "Positieve positie", "Prefix ID": "Voorvoegsel-ID", "Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "Voorvoegsel-ID wordt gebruikt om conflicten met andere verbindingen te vermijden door een voorvoegsel aan het model-ID toe te voegen - laat leeg om uit te schakelen", - "Presence Penalty": "", + "Presence Penalty": "Aanwezigheidsstraf", "Previous 30 days": "Afgelopen 30 dagen", "Previous 7 days": "Afgelopen 7 dagen", - "Private": "", + "Private": "Privé", "Profile Image": "Profielafbeelding", - "Prompt": "", + "Prompt": "Prompt", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Prompt (bv. Vertel me een leuke gebeurtenis over het Romeinse Rijk)", - "Prompt Autocompletion": "", + "Prompt Autocompletion": "Automatische promptaanvulling", "Prompt Content": "Promptinhoud", "Prompt created successfully": "Prompt succesvol aangemaakt", "Prompt suggestions": "Promptsuggesties", "Prompt updated successfully": "Prompt succesvol bijgewerkt", "Prompts": "Prompts", "Prompts Access": "Prompttoegang", - "Prompts Public Sharing": "", - "Public": "", + "Prompts Public Sharing": "Publiek prompts delen", + "Public": "Publiek", "Pull \"{{searchValue}}\" from Ollama.com": "Haal \"{{searchValue}}\" uit Ollama.com", "Pull a model from Ollama.com": "Haal een model van Ollama.com", "Query Generation Prompt": "Vraaggeneratieprompt", "RAG Template": "RAG-sjabloon", "Rating": "Beoordeling", "Re-rank models by topic similarity": "Herrangschik modellen op basis van onderwerpsovereenkomst", - "Read": "", + "Read": "Voorlezen", "Read Aloud": "Voorlezen", - "Reasoning Effort": "", + "Reasoning Effort": "Redeneerinspanning", "Record voice": "Neem stem op", "Redirecting you to Open WebUI Community": "Je wordt doorgestuurd naar OpenWebUI Community", - "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "", + "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Vermindert de kans op het genereren van onzin. Een hogere waarde (bijv. 100) zal meer diverse antwoorden geven, terwijl een lagere waarde (bijv. 10) conservatiever zal zijn.", "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refereer naar jezelf als \"user\" (bv. \"User is Spaans aan het leren\"", "References from": "Referenties van", "Refused when it shouldn't have": "Geweigerd terwijl het niet had moeten", @@ -867,8 +867,8 @@ "Rename": "Hernoemen", "Reorder Models": "Herschik modellen", "Repeat Last N": "Herhaal Laatste N", - "Repeat Penalty (Ollama)": "", - "Reply in Thread": "", + "Repeat Penalty (Ollama)": "Herhalingsstraf (Ollama)", + "Reply in Thread": "Antwoord in draad", "Request Mode": "Request Modus", "Reranking Model": "Reranking Model", "Reranking model disabled": "Reranking model uitgeschakeld", @@ -877,12 +877,12 @@ "Reset All Models": "Herstel alle modellen", "Reset Upload Directory": "Herstel Uploadmap", "Reset Vector Storage/Knowledge": "Herstel Vectoropslag/-kennis", - "Reset view": "", + "Reset view": "Herstel zicht", "Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Antwoordmeldingen kunnen niet worden geactiveerd omdat de rechten voor de website zijn geweigerd. Ga naar de instellingen van uw browser om de benodigde toegang te verlenen.", "Response splitting": "Antwoord splitsing", "Result": "Resultaat", - "Retrieval": "", - "Retrieval Query Generation": "", + "Retrieval": "Ophalen", + "Retrieval Query Generation": "Ophaalqueriegeneratie", "Rich Text Input for Chat": "Rijke tekstinvoer voor chatten", "RK": "RK", "Role": "Rol", @@ -912,11 +912,11 @@ "Search options": "Opties zoeken", "Search Prompts": "Prompts zoeken", "Search Result Count": "Aantal zoekresultaten", - "Search the internet": "", + "Search the internet": "Zoek op het internet", "Search Tools": "Zoek gereedschappen", "SearchApi API Key": "SearchApi API-sleutel", "SearchApi Engine": "SearchApi Engine", - "Searched {{count}} sites": "", + "Searched {{count}} sites": "Zocht op {{count}} sites", "Searching \"{{searchQuery}}\"": "\"{{searchQuery}}\" aan het zoeken.", "Searching Knowledge for \"{{searchQuery}}\"": "Zoek kennis bij \"{{searchQuery}}\"", "Searxng Query URL": "Searxng Query URL", @@ -931,8 +931,8 @@ "Select a pipeline": "Selecteer een pijplijn", "Select a pipeline url": "Selecteer een pijplijn-URL", "Select a tool": "Selecteer een tool", - "Select an auth method": "", - "Select an Ollama instance": "", + "Select an auth method": "Selecteer een authenticatiemethode", + "Select an Ollama instance": "Selecteer een Ollama-instantie", "Select Engine": "Selecteer Engine", "Select Knowledge": "Selecteer kennis", "Select only one model to call": "Selecteer maar één model om aan te roepen", @@ -943,8 +943,8 @@ "Send message": "Stuur bericht", "Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Stuurt `stream_options: { include_usage: true }` in het verzoek. \nOndersteunde providers zullen informatie over tokengebruik in het antwoord terugsturen als dit aan staat.", "September": "September", - "SerpApi API Key": "", - "SerpApi Engine": "", + "SerpApi API Key": "SerpApi API-sleutel", + "SerpApi Engine": "SerpApi-engine", "Serper API Key": "Serper API-sleutel", "Serply API Key": "Serply API-sleutel", "Serpstack API Key": "Serpstack API-sleutel", @@ -960,26 +960,26 @@ "Set Scheduler": "Stel planner in", "Set Steps": "Stel stappen in", "Set Task Model": "Taakmodel instellen", - "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "", + "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Stel het aantal lagen in dat wordt overgeheveld naar de GPU. Het verhogen van deze waarde kan de prestaties aanzienlijk verbeteren voor modellen die geoptimaliseerd zijn voor GPU-versnelling, maar kan ook meer stroom en GPU-bronnen verbruiken.", "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Stel het aantal threads in dat wordt gebruikt voor berekeningen. Deze optie bepaalt hoeveel threads worden gebruikt om gelijktijdig binnenkomende verzoeken te verwerken. Het verhogen van deze waarde kan de prestaties verbeteren onder hoge concurrency werklasten, maar kan ook meer CPU-bronnen verbruiken.", "Set Voice": "Stel stem in", "Set whisper model": "Stel Whisper-model in", - "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "", - "Sets how far back for the model to look back to prevent repetition.": "", - "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "", - "Sets the size of the context window used to generate the next token.": "", + "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Stelt een vlakke bias in tegen tokens die minstens één keer zijn voorgekomen. Een hogere waarde (bijv. 1,5) straft herhalingen sterker af, terwijl een lagere waarde (bijv. 0,9) toegeeflijker is. Bij 0 is het uitgeschakeld.", + "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Stelt een schaalvooroordeel in tegen tokens om herhalingen te bestraffen, gebaseerd op hoe vaak ze zijn voorgekomen. Een hogere waarde (bijv. 1,5) straft herhalingen sterker af, terwijl een lagere waarde (bijv. 0,9) toegeeflijker is. Bij 0 is het uitgeschakeld.", + "Sets how far back for the model to look back to prevent repetition.": "Stelt in hoe ver het model terug moet kijken om herhaling te voorkomen.", + "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "Stelt de willekeurigheid in om te gebruiken voor het genereren. Als je dit op een specifiek getal instelt, genereert het model dezelfde tekst voor dezelfde prompt.", + "Sets the size of the context window used to generate the next token.": "Stelt de grootte van het contextvenster in dat gebruikt wordt om het volgende token te genereren.", "Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Stelt de te gebruiken stopsequentie in. Als dit patroon wordt gevonden, stopt de LLM met het genereren van tekst en keert terug. Er kunnen meerdere stoppatronen worden ingesteld door meerdere afzonderlijke stopparameters op te geven in een modelbestand.", "Settings": "Instellingen", "Settings saved successfully!": "Instellingen succesvol opgeslagen!", "Share": "Delen", "Share Chat": "Deel chat", "Share to Open WebUI Community": "Deel naar OpenWebUI-community", - "Sharing Permissions": "", + "Sharing Permissions": "Deeltoestemmingen", "Show": "Toon", "Show \"What's New\" modal on login": "Toon \"Wat is nieuw\" bij inloggen", "Show Admin Details in Account Pending Overlay": "Admin-details weergeven in overlay in afwachting van account", - "Show Model": "", + "Show Model": "Toon model", "Show shortcuts": "Toon snelkoppelingen", "Show your support!": "Toon je steun", "Showcased creativity": "Toonde creativiteit", @@ -990,7 +990,7 @@ "Sign up": "Registreren", "Sign up to {{WEBUI_NAME}}": "Meld je aan bij {{WEBUI_NAME}}", "Signing in to {{WEBUI_NAME}}": "Aan het inloggen bij {{WEBUI_NAME}}", - "sk-1234": "", + "sk-1234": "sk-1234", "Source": "Bron", "Speech Playback Speed": "Afspeelsnelheid spraak", "Speech recognition error: {{error}}": "Spraakherkenning fout: {{error}}", @@ -1010,13 +1010,13 @@ "System": "Systeem", "System Instructions": "Systeem instructies", "System Prompt": "Systeem prompt", - "Tags": "", - "Tags Generation": "", + "Tags": "Tags", + "Tags Generation": "Taggeneratie", "Tags Generation Prompt": "Prompt voor taggeneratie", - "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "", - "Talk to model": "", + "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "Tail free sampling wordt gebruikt om de impact van minder waarschijnlijke tokens uit de uitvoer te verminderen. Een hogere waarde (bijvoorbeeld 2,0) zal de impact meer verminderen, terwijl een waarde van 1,0 deze instelling uitschakelt.", + "Talk to model": "Praat met model", "Tap to interrupt": "Tik om te onderbreken", - "Tasks": "", + "Tasks": "Taken", "Tavily API Key": "Tavily API-sleutel", "Tell us more:": "Vertel ons meer:", "Temperature": "Temperatuur", @@ -1028,24 +1028,24 @@ "Thanks for your feedback!": "Bedankt voor je feedback!", "The Application Account DN you bind with for search": "Het applicatieaccount DN waarmee je zoekt", "The base to search for users": "De basis om gebruikers te zoeken", - "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "", + "The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "De batchgrootte bepaalt hoeveel tekstverzoeken tegelijk worden verwerkt. Een hogere batchgrootte kan de prestaties en snelheid van het model verhogen, maar vereist ook meer geheugen.", "The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "De ontwikkelaars achter deze plugin zijn gepassioneerde vrijwilligers uit de gemeenschap. Als je deze plugin nuttig vindt, overweeg dan om bij te dragen aan de ontwikkeling ervan.", "The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Het beoordelingsklassement is gebaseerd op het Elo-classificatiesysteem en wordt in realtime bijgewerkt.", - "The LDAP attribute that maps to the mail that users use to sign in.": "", - "The LDAP attribute that maps to the username that users use to sign in.": "Het LDAP-attribuut dat de gebruikersnaam koppelt die gebruikers gebruiken om in te loggen.", + "The LDAP attribute that maps to the mail that users use to sign in.": "Het LDAP-attribuut dat verwijst naar de e-mail waarmee gebruikers zich aanmelden.", + "The LDAP attribute that maps to the username that users use to sign in.": "Het LDAP-attribuut dat verwijst naar de gebruikersnaam die gebruikers gebruiken om in te loggen.", "The leaderboard is currently in beta, and we may adjust the rating calculations as we refine the algorithm.": "Het leaderboard is momenteel in bèta en we kunnen de ratingberekeningen aanpassen naarmate we het algoritme verfijnen.", "The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "De maximale bestandsgrootte in MB. Als het bestand groter is dan deze limiet, wordt het bestand niet geüpload.", "The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Het maximum aantal bestanden dat in één keer kan worden gebruikt in de chat. Als het aantal bestanden deze limiet overschrijdt, worden de bestanden niet geüpload.", - "The score should be a value between 0.0 (0%) and 1.0 (100%).": "Het score moet een waarde zijn tussen 0.0 (0%) en 1.0 (100%).", - "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "", + "The score should be a value between 0.0 (0%) and 1.0 (100%).": "De score moet een waarde zijn tussen 0.0 (0%) en 1.0 (100%).", + "The temperature of the model. Increasing the temperature will make the model answer more creatively.": "De temperatuur van het model. De temperatuur groter maken zal het model creatiever laten antwoorden.", "Theme": "Thema", "Thinking...": "Aan het denken...", "This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?", - "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "", + "This channel was created on {{createdAt}}. This is the very beginning of the {{channelName}} channel.": "Dit kanaal was gecreëerd op {{createdAt}}. Dit het begin van het {{channelName}} kanaal.", "This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!", "This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.", - "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "", - "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "", + "This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "Deze optie bepaalt hoeveel tokens bewaard blijven bij het verversen van de context. Als deze bijvoorbeeld op 2 staat, worden de laatste 2 tekens van de context van het gesprek bewaard. Het behouden van de context kan helpen om de continuïteit van een gesprek te behouden, maar het kan de mogelijkheid om te reageren op nieuwe onderwerpen verminderen.", + "This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "Deze optie stelt het maximum aantal tokens in dat het model kan genereren in zijn antwoord. Door dit limiet te verhogen, kan het model langere antwoorden geven, maar het kan ook de kans vergroten dat er onbehulpzame of irrelevante inhoud wordt gegenereerd.", "This option will delete all existing files in the collection and replace them with newly uploaded files.": "Deze optie verwijdert alle bestaande bestanden in de collectie en vervangt ze door nieuw geüploade bestanden.", "This response was generated by \"{{model}}\"": "Dit antwoord is gegenereerd door \"{{model}}\"", "This will delete": "Dit zal verwijderen", @@ -1054,8 +1054,8 @@ "This will delete all models including custom models and cannot be undone.": "Dit zal alle modellen, ook aangepaste modellen, verwijderen en kan niet ontdaan worden", "This will reset the knowledge base and sync all files. Do you wish to continue?": "Dit zal de kennisdatabase resetten en alle bestanden synchroniseren. Wilt u doorgaan?", "Thorough explanation": "Gevorderde uitleg", - "Thought for {{DURATION}}": "", - "Thought for {{DURATION}} seconds": "", + "Thought for {{DURATION}}": "Dacht {{DURATION}}", + "Thought for {{DURATION}} seconds": "Dacht {{DURATION}} seconden", "Tika": "Tika", "Tika Server URL required.": "Tika Server-URL vereist", "Tiktoken": "Tiktoken", @@ -1064,14 +1064,14 @@ "Title (e.g. Tell me a fun fact)": "Titel (bv. Vertel me een leuke gebeurtenis)", "Title Auto-Generation": "Titel Auto-Generatie", "Title cannot be an empty string.": "Titel kan niet leeg zijn.", - "Title Generation": "", + "Title Generation": "Titelgeneratie", "Title Generation Prompt": "Titel Generatie Prompt", "TLS": "TLS", "To access the available model names for downloading,": "Om de beschikbare modelnamen voor downloaden te openen,", "To access the GGUF models available for downloading,": "Om toegang te krijgen tot de GGUF-modellen die beschikbaar zijn voor downloaden,", "To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Om toegang te krijgen tot de WebUI, neem contact op met de administrator. Beheerders kunnen de gebruikersstatussen beheren vanuit het Beheerderspaneel.", "To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Om hier een kennisbron bij te voegen, voeg ze eerst aan de \"Kennis\" werkplaats toe.", - "To learn more about available endpoints, visit our documentation.": "", + "To learn more about available endpoints, visit our documentation.": "Om meer over beschikbare endpoints te leren, bezoek onze documentatie.", "To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Om je privacy te beschermen, worden alleen beoordelingen, model-ID's, tags en metadata van je feedback gedeeld - je chatlogs blijven privé en worden niet opgenomen.", "To select actions here, add them to the \"Functions\" workspace first.": "Om hier acties te selecteren, voeg ze eerst aan de \"Functies\" Werkplaats toe.", "To select filters here, add them to the \"Functions\" workspace first.": "Om hier filters te selecteren, voeg ze eerst aan de \"Functies\" Werkplaats toe.", From 82e5a6410c11c0445489e6666553a9c2f77b6777 Mon Sep 17 00:00:00 2001 From: Alex <70099710+Xelaph@users.noreply.github.com> Date: Tue, 1 Apr 2025 16:09:17 +0200 Subject: [PATCH 512/623] Update translation.json --- src/lib/i18n/locales/nl-NL/translation.json | 56 ++++++++++----------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 6ce9e1fd46b..e71dd4bed97 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -337,7 +337,7 @@ "Don't like the style": "Vind je de stijl niet mooi?", "Done": "Voltooid", "Download": "Download", - "Download as SVG": "", + "Download as SVG": "Download als SVG", "Download canceled": "Download geannuleerd", "Download Database": "Download database", "Drag and drop a file to upload or select a file to view": "Sleep een bestand om te uploaden of selecteer een bestand om te bekijken", @@ -526,9 +526,9 @@ "Form": "Formulier", "Format your variables using brackets like this:": "Formateer je variabelen met haken zoals dit:", "Frequency Penalty": "Frequentiestraf", - "Full Context Mode": "", + "Full Context Mode": "Volledige contextmodus", "Function": "Functie", - "Function Calling": "", + "Function Calling": "Functieaanroep", "Function created successfully": "Functie succesvol aangemaakt", "Function deleted successfully": "Functie succesvol verwijderd", "Function Description": "Functiebeschrijving", @@ -736,7 +736,7 @@ "No files found.": "Geen bestanden gevonden", "No groups with access, add a group to grant access": "Geen groepen met toegang, voeg een groep toe om toegang te geven", "No HTML, CSS, or JavaScript content found.": "Geen HTML, CSS, of JavaScript inhoud gevonden", - "No inference engine with management support found": "", + "No inference engine with management support found": "Geen inferentie-engine met beheerondersteuning gevonden", "No knowledge found": "Geen kennis gevonden", "No memories to clear": "Geen herinneringen om op te ruimen", "No model IDs": "Geen model-ID's", @@ -1093,22 +1093,22 @@ "Tools": "Gereedschappen", "Tools Access": "Gereedschaptoegang", "Tools are a function calling system with arbitrary code execution": "Gereedschappen zijn een systeem voor het aanroepen van functies met willekeurige code-uitvoering", - "Tools Function Calling Prompt": "", + "Tools Function Calling Prompt": "Gereedschapsfunctie aanroepprompt", "Tools have a function calling system that allows arbitrary code execution": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", "Tools have a function calling system that allows arbitrary code execution.": "Gereedschappen hebben een systeem voor het aanroepen van functies waarmee willekeurige code kan worden uitgevoerd", - "Tools Public Sharing": "", + "Tools Public Sharing": "Gereedschappen publiek delen", "Top K": "Top K", - "Top K Reranker": "", + "Top K Reranker": "Top K herranker", "Top P": "Top P", - "Transformers": "", + "Transformers": "Transformers", "Trouble accessing Ollama?": "Problemen met toegang tot Ollama?", - "Trust Proxy Environment": "", + "Trust Proxy Environment": "Vertrouwelijk proxyomgeving", "TTS Model": "TTS Model", "TTS Settings": "TTS instellingen", "TTS Voice": "TTS Stem", "Type": "Type", "Type Hugging Face Resolve (Download) URL": "Type Hugging Face Resolve (Download) URL", - "Uh-oh! There was an issue with the response.": "", + "Uh-oh! There was an issue with the response.": "Oh-oh! Er was een probleem met het antwoord.", "UI": "UI", "Unarchive All": "Onarchiveer alles", "Unarchive All Archived Chats": "Onarchiveer alle gearchiveerde chats", @@ -1124,7 +1124,7 @@ "Updated": "Bijgewerkt", "Updated at": "Bijgewerkt om", "Updated At": "Bijgewerkt om", - "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "", + "Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Upgrade naar een licentie voor meer mogelijkheden, waaronder aangepaste thematisering en branding, en speciale ondersteuning.", "Upload": "Uploaden", "Upload a GGUF model": "Upload een GGUF-model", "Upload directory": "Upload map", @@ -1140,10 +1140,10 @@ "Use Initials": "Gebruik initialen", "use_mlock (Ollama)": "use_mlock (Ollama)", "use_mmap (Ollama)": "use_mmap (Ollama)", - "user": "user", - "User": "User", + "user": "gebruiker", + "User": "Gebruiker", "User location successfully retrieved.": "Gebruikerslocatie succesvol opgehaald", - "User Webhooks": "", + "User Webhooks": "Gebruiker-webhooks", "Username": "Gebruikersnaam", "Users": "Gebruikers", "Using the default arena model with all models. Click the plus button to add custom models.": "Het standaard arena-model gebruiken met alle modellen. Klik op de plusknop om aangepaste modellen toe te voegen.", @@ -1154,11 +1154,11 @@ "Valves updated successfully": "Kleppen succesvol bijgewerkt", "variable": "variabele", "variable to have them replaced with clipboard content.": "variabele om ze te laten vervangen door klembord inhoud.", - "Verify Connection": "", + "Verify Connection": "Controleer verbinding", "Version": "Versie", "Version {{selectedVersion}} of {{totalVersions}}": "Versie {{selectedVersion}} van {{totalVersions}}", - "View Replies": "", - "View Result from `{{NAME}}`": "", + "View Replies": "Bekijke resultaten", + "View Result from `{{NAME}}`": "Bekijk resultaten van `{{NAME}}`", "Visibility": "Zichtbaarheid", "Voice": "Stem", "Voice Input": "Steminvoer", @@ -1166,19 +1166,19 @@ "Warning:": "Waarschuwing", "Warning: Enabling this will allow users to upload arbitrary code on the server.": "Waarschuwing: Door dit in te schakelen kunnen gebruikers willekeurige code uploaden naar de server.", "Warning: If you update or change your embedding model, you will need to re-import all documents.": "Warning: Als je de embedding model bijwerkt of wijzigt, moet je alle documenten opnieuw importeren.", - "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "", + "Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Waarschuwing: Jupyter kan willekeurige code uitvoeren, wat ernstige veiligheidsrisico's met zich meebrengt - ga uiterst voorzichtig te werk. ", "Web": "Web", "Web API": "Web-API", "Web Search": "Zoeken op het web", "Web Search Engine": "Zoekmachine op het web", - "Web Search in Chat": "", - "Web Search Query Generation": "", + "Web Search in Chat": "Zoekopdracht in chat", + "Web Search Query Generation": "Zoekopdracht generatie", "Webhook URL": "Webhook URL", "WebUI Settings": "WebUI Instellingen", - "WebUI URL": "", + "WebUI URL": "WebUI-URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI zal verzoeken doen aan \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI zal verzoeken doen aan \"{{url}}/chat/completions\"", - "WebUI will make requests to \"{{url}}/openapi.json\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "WebUI zal verzoeken doen aan \"{{url}}/openapi.json\"", "What are you trying to achieve?": "Wat probeer je te bereiken?", "What are you working on?": "Waar werk je aan?", "What’s New in": "Wat is nieuw in", @@ -1188,21 +1188,21 @@ "Why?": "Waarom?", "Widescreen Mode": "Breedschermmodus", "Won": "Gewonnen", - "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "", + "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "Werkt samen met top-k. Een hogere waarde (bijv. 0,95) leidt tot meer diverse tekst, terwijl een lagere waarde (bijv. 0,5) meer gerichte en conservatieve tekst genereert.", "Workspace": "Werkruimte", "Workspace Permissions": "Werkruimtemachtigingen", - "Write": "", + "Write": "Schrijf", "Write a prompt suggestion (e.g. Who are you?)": "Schrijf een prompt suggestie (bijv. Wie ben je?)", "Write a summary in 50 words that summarizes [topic or keyword].": "Schrijf een samenvatting in 50 woorden die [onderwerp of trefwoord] samenvat.", "Write something...": "Schrijf iets...", "Write your model template content here": "Schrijf je modelsjablooninhoud hier", "Yesterday": "Gisteren", "You": "Jij", - "You are currently using a trial license. Please contact support to upgrade your license.": "", + "You are currently using a trial license. Please contact support to upgrade your license.": "Je gebruikt momenteel een proeflicentie. Neem contact op met de ondersteuning om je licentie te upgraden.", "You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Je kunt slechts met maximaal {{maxCount}} bestand(en) tegelijk chatten", "You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Je kunt je interacties met LLM's personaliseren door herinneringen toe te voegen via de 'Beheer'-knop hieronder, waardoor ze nuttiger en voor jou op maat gemaakt worden.", "You cannot upload an empty file.": "Je kunt een leeg bestand niet uploaden.", - "You do not have permission to upload files": "", + "You do not have permission to upload files": "Je hebt geen toestemming om bestanden up te loaden", "You do not have permission to upload files.": "Je hebt geen toestemming om bestanden up te loaden", "You have no archived conversations.": "Je hebt geen gearchiveerde gesprekken.", "You have shared this chat": "Je hebt dit gesprek gedeeld", @@ -1211,6 +1211,6 @@ "Your account status is currently pending activation.": "Je accountstatus wacht nu op activatie", "Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Je volledige bijdrage gaat direct naar de ontwikkelaar van de plugin; Open WebUI neemt hier geen deel van. Het gekozen financieringsplatform kan echter wel zijn eigen kosten hebben.", "Youtube": "Youtube", - "Youtube Language": "", - "Youtube Proxy URL": "" + "Youtube Language": "Youtube-taal", + "Youtube Proxy URL": "Youtube-proxy-URL" } From 883ad55f5c14218d90b320520ff8153a999ecb52 Mon Sep 17 00:00:00 2001 From: Alex <70099710+Xelaph@users.noreply.github.com> Date: Tue, 1 Apr 2025 16:12:31 +0200 Subject: [PATCH 513/623] Update translation.json --- src/lib/i18n/locales/nl-NL/translation.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index e71dd4bed97..b8be535495a 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -134,7 +134,7 @@ "Beta": "Beta", "Bing Search V7 Endpoint": "Bing Search V7 Endpoint", "Bing Search V7 Subscription Key": "Bing Search V7 Subscription Key", - "Bocha Search API Key": ""Bocha Search API-sleutel, + "Bocha Search API Key": "Bocha Search API-sleutel", "Boosting or penalizing specific tokens for constrained responses. Bias values will be clamped between -100 and 100 (inclusive). (Default: none)": "Versterken of bestraffen van specifieke tokens voor beperkte reacties. Biaswaarden worden geklemd tussen -100 en 100 (inclusief). (Standaard: none)", "Brave Search API Key": "Brave Search API-sleutel", "By {{name}}": "Op {{name}}", @@ -856,7 +856,7 @@ "Record voice": "Neem stem op", "Redirecting you to Open WebUI Community": "Je wordt doorgestuurd naar OpenWebUI Community", "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "Vermindert de kans op het genereren van onzin. Een hogere waarde (bijv. 100) zal meer diverse antwoorden geven, terwijl een lagere waarde (bijv. 10) conservatiever zal zijn.", - "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refereer naar jezelf als \"user\" (bv. \"User is Spaans aan het leren\"", + "Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refereer naar jezelf als \"user\" (bv. \"User is Spaans aan het leren\")", "References from": "Referenties van", "Refused when it shouldn't have": "Geweigerd terwijl het niet had moeten", "Regenerate": "Regenereren", From 8f8c34471e7b42ed2485bdbffd4bd9f36f69ef16 Mon Sep 17 00:00:00 2001 From: Daniel Nowak <13685818+lowlyocean@users.noreply.github.com> Date: Tue, 1 Apr 2025 12:03:09 -0400 Subject: [PATCH 514/623] Pin onnxruntime to 1.20.1 to address SIGILL on certain arm64 hosts --- backend/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/requirements.txt b/backend/requirements.txt index 87b5a4d123d..dd7c859329f 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -83,6 +83,8 @@ opencv-python-headless==4.11.0.86 rapidocr-onnxruntime==1.3.24 rank-bm25==0.2.2 +onnxruntime==1.20.1 + faster-whisper==1.1.1 PyJWT[crypto]==2.10.1 From 0447d909515c9abc77ecbf8aabafb182e92fbf69 Mon Sep 17 00:00:00 2001 From: Simon Date: Tue, 1 Apr 2025 20:01:08 +0200 Subject: [PATCH 515/623] Update translation.json --- src/lib/i18n/locales/uk-UA/translation.json | 52 ++++++++++----------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/lib/i18n/locales/uk-UA/translation.json b/src/lib/i18n/locales/uk-UA/translation.json index bb2be72ba3e..d1ec07feabf 100644 --- a/src/lib/i18n/locales/uk-UA/translation.json +++ b/src/lib/i18n/locales/uk-UA/translation.json @@ -6,7 +6,7 @@ "(latest)": "(остання)", "(Ollama)": "(Ollama)", "{{ models }}": "{{ models }}", - "{{COUNT}} Available Tool Servers": "", + "{{COUNT}} Available Tool Servers": "{{COUNT}} Доступні інструменти на серверах", "{{COUNT}} hidden lines": "{{COUNT}} прихованих рядків", "{{COUNT}} Replies": "{{COUNT}} Відповіді", "{{user}}'s Chats": "Чати {{user}}а", @@ -119,7 +119,7 @@ "AUTOMATIC1111 Base URL": "URL-адреса AUTOMATIC1111", "AUTOMATIC1111 Base URL is required.": "Необхідна URL-адреса AUTOMATIC1111.", "Available list": "Список доступності", - "Available Tool Servers": "", + "Available Tool Servers": "Доступні сервери інструментів", "available!": "доступно!", "Awful": "Жахливо", "Azure AI Speech": "Мовлення Azure AI", @@ -217,7 +217,7 @@ "Confirm your action": "Підтвердіть свою дію", "Confirm your new password": "Підтвердіть свій новий пароль", "Connect to your own OpenAI compatible API endpoints.": "Підключіться до своїх власних API-ендпоінтів, сумісних з OpenAI.", - "Connect to your own OpenAPI compatible external tool servers.": "", + "Connect to your own OpenAPI compatible external tool servers.": "Підключіться до своїх власних зовнішніх серверів інструментів, сумісних з OpenAPI.", "Connections": "З'єднання", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Обмежує зусилля на міркування для моделей міркування. Діє лише для моделей міркування від конкретних постачальників, які підтримують зусилля міркування.", "Contact Admin for WebUI Access": "Зверніться до адміна для отримання доступу до WebUI", @@ -344,7 +344,7 @@ "Draw": "Малювати", "Drop any files here to add to the conversation": "Перетягніть сюди файли, щоб додати до розмови", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "напр., '30s','10m'. Дійсні одиниці часу: 'с', 'хв', 'г'.", - "e.g. \"json\" or a JSON schema": "", + "e.g. \"json\" or a JSON schema": "напр., \"json\" або схема JSON", "e.g. 60": "напр. 60", "e.g. A filter to remove profanity from text": "напр., фільтр для видалення нецензурної лексики з тексту", "e.g. My Filter": "напр., Мій фільтр", @@ -379,7 +379,7 @@ "Enable Mirostat sampling for controlling perplexity.": "Увімкнути вибірку Mirostat для контролю перплексії.", "Enable New Sign Ups": "Дозволити нові реєстрації", "Enabled": "Увімкнено", - "Enforce Temporary Chat": "", + "Enforce Temporary Chat": "Застосувати тимчасовий чат", "Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Переконайтеся, що ваш CSV-файл містить 4 колонки в такому порядку: Ім'я, Email, Пароль, Роль.", "Enter {{role}} message here": "Введіть повідомлення {{role}} тут", "Enter a detail about yourself for your LLMs to recall": "Введіть відомості про себе для запам'ятовування вашими LLM.", @@ -442,7 +442,7 @@ "Enter timeout in seconds": "Введіть тайм-аут у секундах", "Enter to Send": "Введіть для відправки", "Enter Top K": "Введіть Top K", - "Enter Top K Reranker": "", + "Enter Top K Reranker": "Введіть Top K Реранкер", "Enter URL (e.g. http://127.0.0.1:7860/)": "Введіть URL-адресу (напр., http://127.0.0.1:7860/)", "Enter URL (e.g. http://localhost:11434)": "Введіть URL-адресу (напр., http://localhost:11434)", "Enter your current password": "Введіть ваш поточний пароль", @@ -468,7 +468,7 @@ "Exceeded the number of seats in your license. Please contact support to increase the number of seats.": "Перевищено кількість місць у вашій ліцензії. Будь ласка, зверніться до підтримки для збільшення кількості місць.", "Exclude": "Виключити", "Execute code for analysis": "Виконати код для аналізу", - "Executing `{{NAME}}`...": "", + "Executing `{{NAME}}`...": "Виконання `{{NAME}}`...", "Expand": "Розгорнути", "Experimental": "Експериментальне", "Explain": "Пояснити", @@ -489,7 +489,7 @@ "External": "Зовнішній", "External Models": "Зовнішні моделі", "Failed to add file.": "Не вдалося додати файл.", - "Failed to connect to {{URL}} OpenAPI tool server": "", + "Failed to connect to {{URL}} OpenAPI tool server": "Не вдалося підключитися до серверу інструментів OpenAPI {{URL}}", "Failed to create API Key.": "Не вдалося створити API ключ.", "Failed to fetch models": "Не вдалося отримати моделі", "Failed to read clipboard contents": "Не вдалося прочитати вміст буфера обміну", @@ -570,7 +570,7 @@ "Hex Color": "Шістнадцятковий колір", "Hex Color - Leave empty for default color": "Шістнадцятковий колір — залиште порожнім для кольору за замовчуванням", "Hide": "Приховати", - "Hide Model": "", + "Hide Model": "Приховати модель", "Home": "Головна", "Host": "Хост", "How can I help you today?": "Чим я можу допомогти вам сьогодні?", @@ -608,7 +608,7 @@ "Integration": "Інтеграція", "Interface": "Інтерфейс", "Invalid file format.": "Неправильний формат файлу.", - "Invalid JSON schema": "", + "Invalid JSON schema": "Невірна схема JSON", "Invalid Tag": "Недійсний тег", "is typing...": "друкує...", "January": "Січень", @@ -630,7 +630,7 @@ "Knowledge Access": "Доступ до знань", "Knowledge created successfully.": "Знання успішно створено.", "Knowledge deleted successfully.": "Знання успішно видалено.", - "Knowledge Public Sharing": "", + "Knowledge Public Sharing": "Публічний обмін знаннями", "Knowledge reset successfully.": "Знання успішно скинуто.", "Knowledge updated successfully": "Знання успішно оновлено", "Kokoro.js (Browser)": "Kokoro.js (Браузер)", @@ -645,8 +645,8 @@ "LDAP server updated": "Сервер LDAP оновлено", "Leaderboard": "Таблиця лідерів", "Leave empty for unlimited": "Залиште порожнім для необмеженого розміру", - "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "", - "Leave empty to include all models from \"{{url}}/models\" endpoint": "", + "Leave empty to include all models from \"{{url}}/api/tags\" endpoint": "Залиште порожнім, щоб включити всі моделі з кінцевої точки \"{{url}}/api/tags\"", + "Leave empty to include all models from \"{{url}}/models\" endpoint": "Залиште порожнім, щоб включити всі моделі з кінцевої точки \"{{url}}/models\"", "Leave empty to include all models or select specific models": "Залиште порожнім, щоб включити усі моделі, або виберіть конкретні моделі.", "Leave empty to use the default prompt, or enter a custom prompt": "Залиште порожнім для використання стандартного запиту, або введіть власний запит", "Leave model field empty to use the default model.": "Залиште поле моделі порожнім, щоб використовувати модель за замовчуванням.", @@ -673,7 +673,7 @@ "Manage Ollama API Connections": "Керувати з'єднаннями Ollama API", "Manage OpenAI API Connections": "Керувати з'єднаннями OpenAI API", "Manage Pipelines": "Керування конвеєрами", - "Manage Tool Servers": "", + "Manage Tool Servers": "Керувати серверами інструментів", "March": "Березень", "Max Tokens (num_predict)": "Макс токенів (num_predict)", "Max Upload Count": "Макс. кількість завантажень", @@ -700,8 +700,8 @@ "Model {{modelId}} not found": "Модель {{modelId}} не знайдено", "Model {{modelName}} is not vision capable": "Модель {{modelName}} не здатна бачити", "Model {{name}} is now {{status}}": "Модель {{name}} тепер має {{status}}", - "Model {{name}} is now hidden": "", - "Model {{name}} is now visible": "", + "Model {{name}} is now hidden": "Модель {{name}} тепер схована", + "Model {{name}} is now visible": "Модель {{name}} тепер видима", "Model accepts image inputs": "Модель приймає зображеня", "Model created successfully!": "Модель створено успішно!", "Model filesystem path detected. Model shortname is required for update, cannot continue.": "Виявлено шлях до файлової системи моделі. Для оновлення потрібно вказати коротке ім'я моделі, не вдасться продовжити.", @@ -717,7 +717,7 @@ "Models": "Моделі", "Models Access": "Доступ до моделей", "Models configuration saved successfully": "Конфігурацію моделей успішно збережено", - "Models Public Sharing": "", + "Models Public Sharing": "Публічний обмін моделями", "Mojeek Search API Key": "API ключ для пошуку Mojeek", "more": "більше", "More": "Більше", @@ -835,14 +835,14 @@ "Profile Image": "Зображення профілю", "Prompt": "Підказка", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Підказка (напр., розкажіть мені цікавий факт про Римську імперію)", - "Prompt Autocompletion": "", + "Prompt Autocompletion": "Автозавершення підказок", "Prompt Content": "Зміст промту", "Prompt created successfully": "Підказку успішно створено", "Prompt suggestions": "Швидкі промти", "Prompt updated successfully": "Підказку успішно оновлено", "Prompts": "Промти", "Prompts Access": "Доступ до підказок", - "Prompts Public Sharing": "", + "Prompts Public Sharing": "Публічний обмін промтами", "Public": "Публічний", "Pull \"{{searchValue}}\" from Ollama.com": "Завантажити \"{{searchValue}}\" з Ollama.com", "Pull a model from Ollama.com": "Завантажити модель з Ollama.com", @@ -975,11 +975,11 @@ "Share": "Поділитися", "Share Chat": "Поділитися чатом", "Share to Open WebUI Community": "Поділитися зі спільнотою OpenWebUI", - "Sharing Permissions": "", + "Sharing Permissions": "Дозволи на обмін", "Show": "Показати", "Show \"What's New\" modal on login": "Показати модальне вікно \"Що нового\" під час входу", "Show Admin Details in Account Pending Overlay": "Відобразити дані адміна у вікні очікування облікового запису", - "Show Model": "", + "Show Model": "Показати модель", "Show shortcuts": "Показати клавіатурні скорочення", "Show your support!": "Підтримайте нас!", "Showcased creativity": "Продемонстрований креатив", @@ -1096,9 +1096,9 @@ "Tools Function Calling Prompt": "Підказка для виклику функцій інструментів", "Tools have a function calling system that allows arbitrary code execution": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду", "Tools have a function calling system that allows arbitrary code execution.": "Інструменти мають систему виклику функцій, яка дозволяє виконання довільного коду.", - "Tools Public Sharing": "", + "Tools Public Sharing": "Публічний обмін інструментами", "Top K": "Top K", - "Top K Reranker": "", + "Top K Reranker": "Top K Реранкер", "Top P": "Top P", "Transformers": "Трансформери", "Trouble accessing Ollama?": "Проблеми з доступом до Ollama?", @@ -1143,7 +1143,7 @@ "user": "користувач", "User": "Користувач", "User location successfully retrieved.": "Місцезнаходження користувача успішно знайдено.", - "User Webhooks": "", + "User Webhooks": "Вебхуки користувача", "Username": "Ім'я користувача", "Users": "Користувачі", "Using the default arena model with all models. Click the plus button to add custom models.": "Використовуючи модель арени за замовчуванням з усіма моделями. Натисніть кнопку плюс, щоб додати користувацькі моделі.", @@ -1158,7 +1158,7 @@ "Version": "Версія", "Version {{selectedVersion}} of {{totalVersions}}": "Версія {{selectedVersion}} з {{totalVersions}}", "View Replies": "Переглянути відповіді", - "View Result from `{{NAME}}`": "", + "View Result from `{{NAME}}`": "Переглянути результат з `{{NAME}}`", "Visibility": "Видимість", "Voice": "Голос", "Voice Input": "Голосове введення", @@ -1178,7 +1178,7 @@ "WebUI URL": "WebUI URL", "WebUI will make requests to \"{{url}}/api/chat\"": "WebUI надсилатиме запити до \"{{url}}/api/chat\"", "WebUI will make requests to \"{{url}}/chat/completions\"": "WebUI надсилатиме запити до \"{{url}}/chat/completions\"", - "WebUI will make requests to \"{{url}}/openapi.json\"": "", + "WebUI will make requests to \"{{url}}/openapi.json\"": "WebUI буде здійснювати запити до \"{{url}}/openapi.json\"", "What are you trying to achieve?": "Чого ви прагнете досягти?", "What are you working on?": "Над чим ти працюєш?", "What’s New in": "Що нового в", From 93d7702e8c889361e4198a12038fdcccd5f83505 Mon Sep 17 00:00:00 2001 From: Patrick Wachter Date: Tue, 1 Apr 2025 16:26:32 +0200 Subject: [PATCH 516/623] refactor: move MistralLoader to a separate module and just use the requests package instead of mistralai --- backend/open_webui/retrieval/loaders/main.py | 49 +--- .../open_webui/retrieval/loaders/mistral.py | 226 ++++++++++++++++++ backend/requirements.txt | 1 - 3 files changed, 227 insertions(+), 49 deletions(-) create mode 100644 backend/open_webui/retrieval/loaders/mistral.py diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index e75c69682d2..e1b485b84ba 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -21,7 +21,7 @@ ) from langchain_core.documents import Document -from mistralai import Mistral +from .mistral import MistralLoader from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL @@ -166,53 +166,6 @@ def load(self) -> list[Document]: raise Exception(f"Error calling Docling: {error_msg}") -class MistralLoader: - def __init__(self, api_key: str, file_path: str): - self.api_key = api_key - self.file_path = file_path - self.client = Mistral(api_key=api_key) - - def load(self) -> list[Document]: - log.info("Uploading file to Mistral OCR") - uploaded_pdf = self.client.files.upload( - file={ - "file_name": self.file_path.split("/")[-1], - "content": open(self.file_path, "rb"), - }, - purpose="ocr", - ) - log.info("File uploaded to Mistral OCR, getting signed URL") - signed_url = self.client.files.get_signed_url(file_id=uploaded_pdf.id) - log.info("Signed URL received, processing OCR") - ocr_response = self.client.ocr.process( - model="mistral-ocr-latest", - document={ - "type": "document_url", - "document_url": signed_url.url, - }, - ) - log.info("OCR processing done, deleting uploaded file") - deleted_pdf = self.client.files.delete(file_id=uploaded_pdf.id) - log.info("Uploaded file deleted") - log.debug("OCR response: %s", ocr_response) - if not hasattr(ocr_response, "pages") or not ocr_response.pages: - log.error("No pages found in OCR response") - return [Document(page_content="No text content found", metadata={})] - - return [ - Document( - page_content=page.markdown, - metadata={ - "page": page.index, - "page_label": page.index + 1, - "total_pages": len(ocr_response.pages), - }, - ) - for page in ocr_response.pages - if hasattr(page, "markdown") and hasattr(page, "index") - ] - - class Loader: def __init__(self, engine: str = "", **kwargs): self.engine = engine diff --git a/backend/open_webui/retrieval/loaders/mistral.py b/backend/open_webui/retrieval/loaders/mistral.py new file mode 100644 index 00000000000..2203a5b6d4a --- /dev/null +++ b/backend/open_webui/retrieval/loaders/mistral.py @@ -0,0 +1,226 @@ +import requests +import logging +import os +import sys +from typing import List, Dict, Any + +from langchain_core.documents import Document +from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL + +logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) +log = logging.getLogger(__name__) +log.setLevel(SRC_LOG_LEVELS["RAG"]) + + +class MistralLoader: + """ + Loads documents by processing them through the Mistral OCR API using requests. + """ + + BASE_API_URL = "https://api.mistral.ai/v1" + + def __init__(self, api_key: str, file_path: str): + """ + Initializes the loader. + + Args: + api_key: Your Mistral API key. + file_path: The local path to the PDF file to process. + """ + if not api_key: + raise ValueError("API key cannot be empty.") + if not os.path.exists(file_path): + raise FileNotFoundError(f"File not found at {file_path}") + + self.api_key = api_key + self.file_path = file_path + self.headers = {"Authorization": f"Bearer {self.api_key}"} + + def _handle_response(self, response: requests.Response) -> Dict[str, Any]: + """Checks response status and returns JSON content.""" + try: + response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx) + # Handle potential empty responses for certain successful requests (e.g., DELETE) + if response.status_code == 204 or not response.content: + return {} # Return empty dict if no content + return response.json() + except requests.exceptions.HTTPError as http_err: + log.error(f"HTTP error occurred: {http_err} - Response: {response.text}") + raise + except requests.exceptions.RequestException as req_err: + log.error(f"Request exception occurred: {req_err}") + raise + except ValueError as json_err: # Includes JSONDecodeError + log.error(f"JSON decode error: {json_err} - Response: {response.text}") + raise # Re-raise after logging + + def _upload_file(self) -> str: + """Uploads the file to Mistral for OCR processing.""" + log.info("Uploading file to Mistral API") + url = f"{self.BASE_API_URL}/files" + file_name = os.path.basename(self.file_path) + + try: + with open(self.file_path, "rb") as f: + files = {"file": (file_name, f, "application/pdf")} + data = {"purpose": "ocr"} + # No explicit Content-Type header needed here, requests handles it for multipart/form-data + upload_headers = self.headers.copy() # Avoid modifying self.headers + + response = requests.post( + url, headers=upload_headers, files=files, data=data + ) + + response_data = self._handle_response(response) + file_id = response_data.get("id") + if not file_id: + raise ValueError("File ID not found in upload response.") + log.info(f"File uploaded successfully. File ID: {file_id}") + return file_id + except Exception as e: + log.error(f"Failed to upload file: {e}") + raise + + def _get_signed_url(self, file_id: str) -> str: + """Retrieves a temporary signed URL for the uploaded file.""" + log.info(f"Getting signed URL for file ID: {file_id}") + url = f"{self.BASE_API_URL}/files/{file_id}/url" + # Using expiry=24 as per the curl example; adjust if needed. + params = {"expiry": 24} + signed_url_headers = {**self.headers, "Accept": "application/json"} + + try: + response = requests.get(url, headers=signed_url_headers, params=params) + response_data = self._handle_response(response) + signed_url = response_data.get("url") + if not signed_url: + raise ValueError("Signed URL not found in response.") + log.info("Signed URL received.") + return signed_url + except Exception as e: + log.error(f"Failed to get signed URL: {e}") + raise + + def _process_ocr(self, signed_url: str) -> Dict[str, Any]: + """Sends the signed URL to the OCR endpoint for processing.""" + log.info("Processing OCR via Mistral API") + url = f"{self.BASE_API_URL}/ocr" + ocr_headers = { + **self.headers, + "Content-Type": "application/json", + "Accept": "application/json", + } + payload = { + "model": "mistral-ocr-latest", + "document": { + "type": "document_url", + "document_url": signed_url, + }, + # "include_image_base64": False # Explicitly set if needed, default seems false + } + + try: + response = requests.post(url, headers=ocr_headers, json=payload) + ocr_response = self._handle_response(response) + log.info("OCR processing done.") + log.debug("OCR response: %s", ocr_response) + return ocr_response + except Exception as e: + log.error(f"Failed during OCR processing: {e}") + raise + + def _delete_file(self, file_id: str) -> None: + """Deletes the file from Mistral storage.""" + log.info(f"Deleting uploaded file ID: {file_id}") + url = f"{self.BASE_API_URL}/files/{file_id}" + # No specific Accept header needed, default or Authorization is usually sufficient + + try: + response = requests.delete(url, headers=self.headers) + delete_response = self._handle_response( + response + ) # Check status, ignore response body unless needed + log.info( + f"File deleted successfully: {delete_response}" + ) # Log the response if available + except Exception as e: + # Log error but don't necessarily halt execution if deletion fails + log.error(f"Failed to delete file ID {file_id}: {e}") + # Depending on requirements, you might choose to raise the error here + + def load(self) -> List[Document]: + """ + Executes the full OCR workflow: upload, get URL, process OCR, delete file. + + Returns: + A list of Document objects, one for each page processed. + """ + file_id = None + try: + # 1. Upload file + file_id = self._upload_file() + + # 2. Get Signed URL + signed_url = self._get_signed_url(file_id) + + # 3. Process OCR + ocr_response = self._process_ocr(signed_url) + + # 4. Process results + pages_data = ocr_response.get("pages") + if not pages_data: + log.warning("No pages found in OCR response.") + return [Document(page_content="No text content found", metadata={})] + + documents = [] + total_pages = len(pages_data) + for page_data in pages_data: + page_content = page_data.get("markdown") + page_index = page_data.get("index") # API uses 0-based index + + if page_content is not None and page_index is not None: + documents.append( + Document( + page_content=page_content, + metadata={ + "page": page_index, # 0-based index from API + "page_label": page_index + + 1, # 1-based label for convenience + "total_pages": total_pages, + # Add other relevant metadata from page_data if available/needed + # e.g., page_data.get('width'), page_data.get('height') + }, + ) + ) + else: + log.warning( + f"Skipping page due to missing 'markdown' or 'index'. Data: {page_data}" + ) + + if not documents: + # Case where pages existed but none had valid markdown/index + log.warning( + "OCR response contained pages, but none had valid content/index." + ) + return [ + Document( + page_content="No text content found in valid pages", metadata={} + ) + ] + + return documents + + except Exception as e: + log.error(f"An error occurred during the loading process: {e}") + # Return an empty list or a specific error document on failure + return [Document(page_content=f"Error during processing: {e}", metadata={})] + finally: + # 5. Delete file (attempt even if prior steps failed after upload) + if file_id: + try: + self._delete_file(file_id) + except Exception as del_e: + # Log deletion error, but don't overwrite original error if one occurred + log.error( + f"Cleanup error: Could not delete file ID {file_id}. Reason: {del_e}" + ) diff --git a/backend/requirements.txt b/backend/requirements.txt index 078d8d2d34e..ca2ea50609d 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -77,7 +77,6 @@ psutil sentencepiece soundfile==0.13.1 azure-ai-documentintelligence==1.0.0 -mistralai==1.6.0 pillow==11.1.0 opencv-python-headless==4.11.0.86 From c5a8d2f8571a801dffd4795eec2eb616cf9260d3 Mon Sep 17 00:00:00 2001 From: Patrick Wachter Date: Tue, 1 Apr 2025 19:14:26 +0200 Subject: [PATCH 517/623] refactor: update MistralLoader documentation and adjust parameters for signed URL retrieval --- backend/open_webui/retrieval/loaders/mistral.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/backend/open_webui/retrieval/loaders/mistral.py b/backend/open_webui/retrieval/loaders/mistral.py index 2203a5b6d4a..8f3a960a283 100644 --- a/backend/open_webui/retrieval/loaders/mistral.py +++ b/backend/open_webui/retrieval/loaders/mistral.py @@ -14,7 +14,7 @@ class MistralLoader: """ - Loads documents by processing them through the Mistral OCR API using requests. + Loads documents by processing them through the Mistral OCR API. """ BASE_API_URL = "https://api.mistral.ai/v1" @@ -64,7 +64,7 @@ def _upload_file(self) -> str: with open(self.file_path, "rb") as f: files = {"file": (file_name, f, "application/pdf")} data = {"purpose": "ocr"} - # No explicit Content-Type header needed here, requests handles it for multipart/form-data + upload_headers = self.headers.copy() # Avoid modifying self.headers response = requests.post( @@ -85,8 +85,7 @@ def _get_signed_url(self, file_id: str) -> str: """Retrieves a temporary signed URL for the uploaded file.""" log.info(f"Getting signed URL for file ID: {file_id}") url = f"{self.BASE_API_URL}/files/{file_id}/url" - # Using expiry=24 as per the curl example; adjust if needed. - params = {"expiry": 24} + params = {"expiry": 1} signed_url_headers = {**self.headers, "Accept": "application/json"} try: @@ -116,7 +115,7 @@ def _process_ocr(self, signed_url: str) -> Dict[str, Any]: "type": "document_url", "document_url": signed_url, }, - # "include_image_base64": False # Explicitly set if needed, default seems false + "include_image_base64": False, } try: From 2b7dd6e39999a5b2aaa45d095638a1231edf796d Mon Sep 17 00:00:00 2001 From: landerrosette <57791410+landerrosette@users.noreply.github.com> Date: Wed, 2 Apr 2025 02:58:02 +0800 Subject: [PATCH 518/623] refactor: standardize filter valve retrieval logic --- backend/open_webui/utils/filter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/utils/filter.py b/backend/open_webui/utils/filter.py index a11aeb092c8..76c9db9eb1a 100644 --- a/backend/open_webui/utils/filter.py +++ b/backend/open_webui/utils/filter.py @@ -12,9 +12,9 @@ def get_sorted_filter_ids(model: dict): def get_priority(function_id): function = Functions.get_function_by_id(function_id) - if function is not None and hasattr(function, "valves"): - # TODO: Fix FunctionModel to include vavles - return (function.valves if function.valves else {}).get("priority", 0) + if function is not None: + valves = Functions.get_function_valves_by_id(function_id) + return valves.get("priority", 0) if valves else 0 return 0 filter_ids = [function.id for function in Functions.get_global_filter_functions()] From adaa61498610b70502008659b22a865a583e5836 Mon Sep 17 00:00:00 2001 From: Panda Date: Wed, 2 Apr 2025 10:35:05 +0200 Subject: [PATCH 519/623] fix --- src/lib/i18n/locales/zh-CN/translation.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lib/i18n/locales/zh-CN/translation.json b/src/lib/i18n/locales/zh-CN/translation.json index cafcd152ab6..35ffc2e0e16 100644 --- a/src/lib/i18n/locales/zh-CN/translation.json +++ b/src/lib/i18n/locales/zh-CN/translation.json @@ -344,7 +344,7 @@ "Draw": "平局", "Drop any files here to add to the conversation": "拖动文件到此处以添加到对话中", "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "例如 '30s','10m'。有效的时间单位是秒:'s',分:'m',时:'h'。", - "e.g. \"json\" or a JSON schema": "例如 \"json\" 或 JSON 结构", + "e.g. \"json\" or a JSON schema": "例如 \"json\" 或一个 JSON schema", "e.g. 60": "例如 '60'", "e.g. A filter to remove profanity from text": "例如:一个用于过滤文本中不当内容的过滤器", "e.g. My Filter": "例如:我的过滤器", @@ -608,7 +608,7 @@ "Integration": "集成", "Interface": "界面", "Invalid file format.": "无效文件格式。", - "Invalid JSON schema": "无效的 JSON", + "Invalid JSON schema": "无效的 JSON schema", "Invalid Tag": "无效标签", "is typing...": "输入中...", "January": "一月", @@ -835,7 +835,7 @@ "Profile Image": "用户头像", "Prompt": "提示词 (Prompt)", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "提示词(例如:给我讲一个关于罗马帝国的趣事。)", - "Prompt Autocompletion": "提示词自动完成", + "Prompt Autocompletion": "提示词自动补全", "Prompt Content": "提示词内容", "Prompt created successfully": "提示词创建成功", "Prompt suggestions": "提示词建议", @@ -1061,7 +1061,7 @@ "Tiktoken": "Tiktoken", "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "提示:在每次替换后,在对话输入中按 Tab 键可以连续更新多个变量。", "Title": "标题", - "Title (e.g. Tell me a fun fact)": "标题(例如 给我讲一个有趣的事实)", + "Title (e.g. Tell me a fun fact)": "标题(例如:给我讲一个有趣的事实)", "Title Auto-Generation": "自动生成标题", "Title cannot be an empty string.": "标题不能为空。", "Title Generation": "标题生成", @@ -1134,7 +1134,7 @@ "Upload Progress": "上传进度", "URL": "URL", "URL Mode": "URL 模式", - "Use '#' in the prompt input to load and include your knowledge.": "在输入框中输入'#'号来加载你需要的知识库内容。", + "Use '#' in the prompt input to load and include your knowledge.": "在输入框中输入 '#' 号来加载你需要的知识库内容。", "Use Gravatar": "使用来自 Gravatar 的头像", "Use groups to group your users and assign permissions.": "使用权限组来组织用户并分配权限。", "Use Initials": "使用首个字符作为头像", From 0ac00b92569bd023de08c73663e6fe2314564e11 Mon Sep 17 00:00:00 2001 From: Patrick Wachter Date: Wed, 2 Apr 2025 13:56:10 +0200 Subject: [PATCH 520/623] refactor: update import path for MistralLoader --- backend/open_webui/retrieval/loaders/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/open_webui/retrieval/loaders/main.py b/backend/open_webui/retrieval/loaders/main.py index e1b485b84ba..7098822b421 100644 --- a/backend/open_webui/retrieval/loaders/main.py +++ b/backend/open_webui/retrieval/loaders/main.py @@ -21,7 +21,7 @@ ) from langchain_core.documents import Document -from .mistral import MistralLoader +from open_webui.retrieval.loaders.mistral import MistralLoader from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL From de8f94b00896220f7d554945edce814de080dc1f Mon Sep 17 00:00:00 2001 From: SadmL Date: Wed, 2 Apr 2025 18:13:52 +0300 Subject: [PATCH 521/623] [i18n] Russian localization update Fixed typos and translated some lines --- src/lib/i18n/locales/ru-RU/translation.json | 50 ++++++++++----------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index d68173e8979..50c47034436 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -70,8 +70,8 @@ "Already have an account?": "У вас уже есть учетная запись?", "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "Альтернатива top_p и направлена на обеспечение баланса качества и разнообразия. Параметр p представляет минимальную вероятность того, что токен будет рассмотрен, по сравнению с вероятностью наиболее вероятного токена. Например, при p=0,05 и наиболее вероятном значении токена, имеющем вероятность 0,9, логиты со значением менее 0,045 отфильтровываются.", "Always": "Всегда", - "Always Collapse Code Blocks": "", - "Always Expand Details": "", + "Always Collapse Code Blocks": "Всегда сворачивать блоки кода", + "Always Expand Details": "Всегда разворачивать детали", "Amazing": "Удивительный", "an assistant": "ассистент", "Analyzed": "Проанализировано", @@ -97,7 +97,7 @@ "Are you sure you want to delete this message?": "Вы уверены, что хотите удалить это сообщение?", "Are you sure you want to unarchive all archived chats?": "Вы уверены, что хотите разархивировать все заархивированные чаты?", "Are you sure?": "Вы уверены?", - "Arena Models": "Модели арены", + "Arena Models": "Арена моделей", "Artifacts": "Артефакты", "Ask": "Спросить", "Ask a question": "Задать вопрос", @@ -191,10 +191,10 @@ "Clone Chat": "Клонировать чат", "Clone of {{TITLE}}": "Клон {{TITLE}}", "Close": "Закрыть", - "Code execution": "Выполнение кода", - "Code Execution": "Выполнение кода", - "Code Execution Engine": "Механизм выполнения кода", - "Code Execution Timeout": "Время ожидания выполнения кода", + "Code execution": "Исполнение кода", + "Code Execution": "Исполнение кода", + "Code Execution Engine": "Механизм исполнения кода", + "Code Execution Timeout": "Время ожидания исполнения кода", "Code formatted successfully": "Код успешно отформатирован", "Code Interpreter": "Интерпретатор кода", "Code Interpreter Engine": "Механизм интерпретатора кода", @@ -206,8 +206,8 @@ "ComfyUI API Key": "ComfyUI ключ API", "ComfyUI Base URL": "Базовый адрес URL ComfyUI", "ComfyUI Base URL is required.": "Необходим базовый адрес URL ComfyUI.", - "ComfyUI Workflow": "ComfyUI Workflow", - "ComfyUI Workflow Nodes": "Узлы ComfyUI Workflow", + "ComfyUI Workflow": "Рабочий процесс ComfyUI", + "ComfyUI Workflow Nodes": "Узлы рабочего процесса ComfyUI", "Command": "Команда", "Completions": "Завершения", "Concurrent Requests": "Одновременные запросы", @@ -217,8 +217,8 @@ "Confirm your action": "Подтвердите свое действие", "Confirm your new password": "Подтвердите свой новый пароль", "Connect to your own OpenAI compatible API endpoints.": "Подключитесь к своим собственным энд-поинтам API, совместимым с OpenAI.", - "Connect to your own OpenAPI compatible external tool servers.": "", - "Connections": "Соединение", + "Connect to your own OpenAPI compatible external tool servers.": "Подключитесь к вашим собственным внешним инструментальным серверам, совместимым с OpenAPI.", + "Connections": "Подключения", "Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "Ограничивает усилия по обоснованию для моделей обоснования. Применимо только к моделям обоснования от конкретных поставщиков, которые поддерживают усилия по обоснованию.", "Contact Admin for WebUI Access": "Обратитесь к администратору для получения доступа к WebUI", "Content": "Содержание", @@ -370,7 +370,7 @@ "Embedding model set to \"{{embedding_model}}\"": "Модель встраивания установлена в \"{{embedding_model}}\"", "Enable API Key": "Включить ключ API", "Enable autocomplete generation for chat messages": "Включить генерацию автозаполнения для сообщений чата", - "Enable Code Execution": "Включить выполнение кода", + "Enable Code Execution": "Включить исполнение кода", "Enable Code Interpreter": "Включить интерпретатор кода", "Enable Community Sharing": "Включить совместное использование", "Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Включите блокировку памяти (mlock), чтобы предотвратить выгрузку данных модели из ОЗУ. Эта опция блокирует рабочий набор страниц модели в оперативной памяти, гарантируя, что они не будут выгружены на диск. Это может помочь поддерживать производительность, избегая ошибок страниц и обеспечивая быстрый доступ к данным.", @@ -589,7 +589,7 @@ "Image Prompt Generation Prompt": "Промпт для создание промпта изображения", "Image Settings": "Настройки изображения", "Images": "Изображения", - "Import Chats": "Импортировать Чаты", + "Import Chats": "Импортировать чаты", "Import Config from JSON File": "Импорт конфигурации из JSON-файла", "Import Functions": "Импортировать Функции", "Import Models": "Импортировать Модели", @@ -636,11 +636,11 @@ "Kokoro.js (Browser)": "Kokoro.js (Браузер)", "Kokoro.js Dtype": "", "Label": "Пометка", - "Landing Page Mode": "Режим Целевой Страницы", + "Landing Page Mode": "Режим целевой страницы", "Language": "Язык", - "Last Active": "Последний Активный", - "Last Modified": "Последнее Изменение", - "Last reply": "Последний Ответ", + "Last Active": "Последний активный", + "Last Modified": "Последнее изменение", + "Last reply": "Последний ответ", "LDAP": "", "LDAP server updated": "LDAP сервер обновлен", "Leaderboard": "Таблица Лидеров", @@ -655,7 +655,7 @@ "Listening...": "Слушаю...", "Llama.cpp": "", "LLMs can make mistakes. Verify important information.": "LLMs могут допускать ошибки. Проверяйте важную информацию.", - "Loader": "", + "Loader": "Загрузчик", "Loading Kokoro.js...": "", "Local": "", "Local Models": "Локальные модели", @@ -673,7 +673,7 @@ "Manage Ollama API Connections": "Управление соединениями API Ollama", "Manage OpenAI API Connections": "Управление соединениями API OpenAI", "Manage Pipelines": "Управление конвейерами", - "Manage Tool Servers": "", + "Manage Tool Servers": "Управление серверами инструментов", "March": "Март", "Max Tokens (num_predict)": "Максимальное количество токенов (num_predict)", "Max Upload Count": "Максимальное количество загрузок", @@ -835,7 +835,7 @@ "Profile Image": "Изображение профиля", "Prompt": "Промпт", "Prompt (e.g. Tell me a fun fact about the Roman Empire)": "Промпт (например, Расскажи мне интересный факт о Римской империи)", - "Prompt Autocompletion": "", + "Prompt Autocompletion": "Автодополнение промпта", "Prompt Content": "Содержание промпта", "Prompt created successfully": "Промпт успешно создан", "Prompt suggestions": "Предложения промптов", @@ -900,7 +900,7 @@ "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Прямое сохранение журналов чата в хранилище вашего браузера больше не поддерживается. Пожалуйста, потратьте минуту, чтобы скачать и удалить ваши журналы чата, нажав на кнопку ниже. Не волнуйтесь, вы легко сможете повторно импортировать свои журналы чата в бэкенд через", "Scroll to bottom when switching between branches": "Прокручивать вниз при переключении веток", "Search": "Поиск", - "Search a model": "Поиск модели", + "Search a model": "Поиск по моделям", "Search Base": "Поиск в базе", "Search Chats": "Поиск в чатах", "Search Collection": "Поиск коллекции", @@ -916,7 +916,7 @@ "Search Tools": "Поиск инструментов", "SearchApi API Key": "Ключ SearchApi API", "SearchApi Engine": "Движок SearchApi", - "Searched {{count}} sites": "Поиск по {{count}} сайтам]", + "Searched {{count}} sites": "Поиск по {{count}} сайтам", "Searching \"{{searchQuery}}\"": "Поиск по запросу \"{{searchQuery}}\"", "Searching Knowledge for \"{{searchQuery}}\"": "Поиск знания для \"{{searchQuery}}\"", "Searxng Query URL": "URL-адрес запроса Searxng", @@ -962,7 +962,7 @@ "Set Task Model": "Установить модель задачи", "Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Задайте количество слоев, которые будут загружаться в графический процессор. Увеличение этого значения может значительно повысить производительность моделей, оптимизированных для ускорения работы графического процессора, но также может потреблять больше энергии и ресурсов графического процессора.", "Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Задайте количество рабочих потоков, используемых для вычислений. Этот параметр определяет, сколько потоков используется для одновременной обработки входящих запросов. Увеличение этого значения может повысить производительность при высоких рабочих нагрузках с параллелизмом, но также может потреблять больше ресурсов процессора.", - "Set Voice": "Установить голос", + "Set Voice": "Задать голос", "Set whisper model": "Выбрать модель whiser", "Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Устанавливает нулевое значение для символов, которые появились хотя бы один раз. Более высокое значение (например, 1,5) будет более строгим наказанием за повторения, в то время как более низкое значение (например, 0,9) будет более мягким. При значении 0 он отключается.", "Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "Устанавливает смещение масштабирования для токенов, чтобы наказывать за повторения, в зависимости от того, сколько раз они появлялись. Более высокое значение (например, 1,5) будет наказывать за повторения более строго, в то время как более низкое значение (например, 0,9) будет более мягким. При значении 0 оно отключается.", @@ -1111,7 +1111,7 @@ "Uh-oh! There was an issue with the response.": "Печаль-беда! Возникла проблема с ответом.", "UI": "Пользовательский интерфейс", "Unarchive All": "Разархивировать ВСЁ", - "Unarchive All Archived Chats": "Разархивировать ВСЕ Заархивированные Чаты", + "Unarchive All Archived Chats": "Разархивировать ВСЕ Заархивированные чаты", "Unarchive Chat": "Разархивировать чат", "Unlock mysteries": "Разблокируйте тайны", "Unpin": "Открепить", @@ -1143,7 +1143,7 @@ "user": "пользователь", "User": "Пользователь", "User location successfully retrieved.": "Местоположение пользователя успешно получено.", - "User Webhooks": "", + "User Webhooks": "Пользовательские веб-хуки", "Username": "Имя пользователя", "Users": "Пользователи", "Using the default arena model with all models. Click the plus button to add custom models.": "Использование модели арены по умолчанию со всеми моделями. Нажмите кнопку «плюс», чтобы добавить пользовательские модели.", From d65471c4201381d0d4fc8b99d595c8dce8871235 Mon Sep 17 00:00:00 2001 From: Silentoplayz <50341825+Silentoplayz@users.noreply.github.com> Date: Wed, 2 Apr 2025 11:28:45 -0400 Subject: [PATCH 522/623] fix my dev environment works again! --- backend/open_webui/env.py | 4 ++-- backend/open_webui/utils/auth.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index e3819fdc5ed..cd2b063ac38 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -66,7 +66,7 @@ #################################### GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper() -if GLOBAL_LOG_LEVEL in logging.getLevelNamesMapping(): +if GLOBAL_LOG_LEVEL in logging._levelToName: logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True) else: GLOBAL_LOG_LEVEL = "INFO" @@ -99,7 +99,7 @@ for source in log_sources: log_env_var = source + "_LOG_LEVEL" SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper() - if SRC_LOG_LEVELS[source] not in logging.getLevelNamesMapping(): + if SRC_LOG_LEVELS[source] not in logging._levelToName: SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}") diff --git a/backend/open_webui/utils/auth.py b/backend/open_webui/utils/auth.py index 6dd3234b061..54ad6a0bf22 100644 --- a/backend/open_webui/utils/auth.py +++ b/backend/open_webui/utils/auth.py @@ -8,7 +8,9 @@ import os -from datetime import UTC, datetime, timedelta +from datetime import datetime, timedelta +import pytz +from pytz import UTC from typing import Optional, Union, List, Dict from open_webui.models.users import Users From ee68c9ea5556c23f76d62b50eeab28908446dc5d Mon Sep 17 00:00:00 2001 From: Silentoplayz <50341825+Silentoplayz@users.noreply.github.com> Date: Wed, 2 Apr 2025 14:25:03 -0400 Subject: [PATCH 523/623] Update Chats.svelte Added Archived Chats option in Chats settings section --- src/lib/components/chat/Settings/Chats.svelte | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/lib/components/chat/Settings/Chats.svelte b/src/lib/components/chat/Settings/Chats.svelte index 2eaea240ffc..7a252458f3a 100644 --- a/src/lib/components/chat/Settings/Chats.svelte +++ b/src/lib/components/chat/Settings/Chats.svelte @@ -16,6 +16,7 @@ import { onMount, getContext } from 'svelte'; import { goto } from '$app/navigation'; import { toast } from 'svelte-sonner'; + import ArchivedChatsModal from '$lib/components/layout/Sidebar/ArchivedChatsModal.svelte'; const i18n = getContext('i18n'); @@ -26,6 +27,7 @@ let showArchiveConfirm = false; let showDeleteConfirm = false; + let showArchivedChatsModal = false; let chatImportInputElement: HTMLInputElement; @@ -95,8 +97,16 @@ await chats.set(await getChatList(localStorage.token, $currentChatPage)); scrollPaginationEnabled.set(true); }; + + const handleArchivedChatsChange = async () => { + currentChatPage.set(1); + await chats.set(await getChatList(localStorage.token, $currentChatPage)); + scrollPaginationEnabled.set(true); + }; + +
@@ -157,6 +167,32 @@
+ + {#if showArchiveConfirm}
From 548c7f17d7ec26144f22402f50348b4eb07b4bad Mon Sep 17 00:00:00 2001 From: CityOfBunbury <165870542+CityOfBunbury@users.noreply.github.com> Date: Thu, 3 Apr 2025 08:24:14 +0800 Subject: [PATCH 524/623] Added OAUTH_USE_PICTURE_CLAIM env var Added OAUTH_USE_PICTURE_CLAIM to config.py Added check to oauth.py on OAUTH_USE_PICTURE_CLAIM, to decide whether to user the profile picture in the claim or the default user.png --- backend/open_webui/config.py | 6 +++ backend/open_webui/utils/oauth.py | 75 +++++++++++++++++-------------- 2 files changed, 47 insertions(+), 34 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 0ac92bd23bd..6dad20f61dd 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -331,6 +331,12 @@ def __getattr__(self, key): # OAuth config #################################### +OAUTH_USE_PICTURE_CLAIM = PersistentConfig( + "OAUTH_USE_PICTURE_CLAIM", + "oauth.oidc.use_picture_claim", + os.environ.get("OAUTH_USE_PICTURE_CLAIM", "True").lower() == "true", +) + ENABLE_OAUTH_SIGNUP = PersistentConfig( "ENABLE_OAUTH_SIGNUP", "oauth.enable_signup", diff --git a/backend/open_webui/utils/oauth.py b/backend/open_webui/utils/oauth.py index ab50247d8bd..75c3842a64d 100644 --- a/backend/open_webui/utils/oauth.py +++ b/backend/open_webui/utils/oauth.py @@ -23,6 +23,7 @@ OAUTH_PROVIDERS, ENABLE_OAUTH_ROLE_MANAGEMENT, ENABLE_OAUTH_GROUP_MANAGEMENT, + OAUTH_USE_PICTURE_CLAIM, OAUTH_ROLES_CLAIM, OAUTH_GROUPS_CLAIM, OAUTH_EMAIL_CLAIM, @@ -57,6 +58,7 @@ auth_manager_config.OAUTH_MERGE_ACCOUNTS_BY_EMAIL = OAUTH_MERGE_ACCOUNTS_BY_EMAIL auth_manager_config.ENABLE_OAUTH_ROLE_MANAGEMENT = ENABLE_OAUTH_ROLE_MANAGEMENT auth_manager_config.ENABLE_OAUTH_GROUP_MANAGEMENT = ENABLE_OAUTH_GROUP_MANAGEMENT +auth_manager_config.OAUTH_USE_PICTURE_CLAIM = OAUTH_USE_PICTURE_CLAIM auth_manager_config.OAUTH_ROLES_CLAIM = OAUTH_ROLES_CLAIM auth_manager_config.OAUTH_GROUPS_CLAIM = OAUTH_GROUPS_CLAIM auth_manager_config.OAUTH_EMAIL_CLAIM = OAUTH_EMAIL_CLAIM @@ -325,41 +327,46 @@ async def handle_callback(self, request, provider, response): if existing_user: raise HTTPException(400, detail=ERROR_MESSAGES.EMAIL_TAKEN) - picture_claim = auth_manager_config.OAUTH_PICTURE_CLAIM - picture_url = user_data.get( - picture_claim, OAUTH_PROVIDERS[provider].get("picture_url", "") - ) - if picture_url: - # Download the profile image into a base64 string - try: - access_token = token.get("access_token") - get_kwargs = {} - if access_token: - get_kwargs["headers"] = { - "Authorization": f"Bearer {access_token}", - } - async with aiohttp.ClientSession() as session: - async with session.get(picture_url, **get_kwargs) as resp: - if resp.ok: - picture = await resp.read() - base64_encoded_picture = base64.b64encode( - picture - ).decode("utf-8") - guessed_mime_type = mimetypes.guess_type( - picture_url - )[0] - if guessed_mime_type is None: - # assume JPG, browsers are tolerant enough of image formats - guessed_mime_type = "image/jpeg" - picture_url = f"data:{guessed_mime_type};base64,{base64_encoded_picture}" - else: - picture_url = "/user.png" - except Exception as e: - log.error( - f"Error downloading profile image '{picture_url}': {e}" - ) + # Check if we should use the picture claim based on configuration + if auth_manager_config.OAUTH_USE_PICTURE_CLAIM: + picture_claim = auth_manager_config.OAUTH_PICTURE_CLAIM + picture_url = user_data.get( + picture_claim, OAUTH_PROVIDERS[provider].get("picture_url", "") + ) + if picture_url: + # Download the profile image into a base64 string + try: + access_token = token.get("access_token") + get_kwargs = {} + if access_token: + get_kwargs["headers"] = { + "Authorization": f"Bearer {access_token}", + } + async with aiohttp.ClientSession() as session: + async with session.get(picture_url, **get_kwargs) as resp: + if resp.ok: + picture = await resp.read() + base64_encoded_picture = base64.b64encode( + picture + ).decode("utf-8") + guessed_mime_type = mimetypes.guess_type( + picture_url + )[0] + if guessed_mime_type is None: + # assume JPG, browsers are tolerant enough of image formats + guessed_mime_type = "image/jpeg" + picture_url = f"data:{guessed_mime_type};base64,{base64_encoded_picture}" + else: + picture_url = "/user.png" + except Exception as e: + log.error( + f"Error downloading profile image '{picture_url}': {e}" + ) + picture_url = "/user.png" + if not picture_url: picture_url = "/user.png" - if not picture_url: + else: + # If OAUTH_USE_PICTURE_CLAIM is False, just use the default image picture_url = "/user.png" username_claim = auth_manager_config.OAUTH_USERNAME_CLAIM From dd5bafe8afcc22c53da5c38989a5b19bf11cc198 Mon Sep 17 00:00:00 2001 From: silentoplayz <50341825+silentoplayz@users.noreply.github.com> Date: Thu, 3 Apr 2025 00:36:16 +0000 Subject: [PATCH 525/623] Update env.py Revert --- backend/open_webui/env.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index cd2b063ac38..e3819fdc5ed 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -66,7 +66,7 @@ #################################### GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper() -if GLOBAL_LOG_LEVEL in logging._levelToName: +if GLOBAL_LOG_LEVEL in logging.getLevelNamesMapping(): logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True) else: GLOBAL_LOG_LEVEL = "INFO" @@ -99,7 +99,7 @@ for source in log_sources: log_env_var = source + "_LOG_LEVEL" SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper() - if SRC_LOG_LEVELS[source] not in logging._levelToName: + if SRC_LOG_LEVELS[source] not in logging.getLevelNamesMapping(): SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}") From 0644abe402bff77876829ae3ee0dca1f203dba6b Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 2 Apr 2025 17:55:24 -0700 Subject: [PATCH 526/623] fix: admin folder deletion issue --- backend/open_webui/routers/folders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/routers/folders.py b/backend/open_webui/routers/folders.py index cf37f9329da..2c41c92854b 100644 --- a/backend/open_webui/routers/folders.py +++ b/backend/open_webui/routers/folders.py @@ -236,7 +236,8 @@ async def delete_folder_by_id( chat_delete_permission = has_permission( user.id, "chat.delete", request.app.state.config.USER_PERMISSIONS ) - if not chat_delete_permission: + + if user.role != "admin" and not chat_delete_permission: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED, From 94bf49440d981eb42b6a181403248ca7fadb3df7 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 2 Apr 2025 18:15:14 -0700 Subject: [PATCH 527/623] enh: unload hybrid model if set to False --- backend/open_webui/routers/retrieval.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 979cd2c70c2..250d27eb394 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -124,7 +124,7 @@ def get_ef( def get_rf( - reranking_model: str, + reranking_model: Optional[str] = None, auto_update: bool = False, ): rf = None @@ -762,6 +762,9 @@ async def update_query_settings( form_data.hybrid if form_data.hybrid else False ) + if not request.app.state.config.ENABLE_RAG_HYBRID_SEARCH: + request.app.state.rf = None + return { "status": True, "template": request.app.state.config.RAG_TEMPLATE, From 7eea95a48c60e7d3833481bb0bf7369f7a77590d Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 2 Apr 2025 18:36:03 -0700 Subject: [PATCH 528/623] feat: direct tools user permissions --- backend/open_webui/config.py | 6 ++++++ backend/open_webui/routers/users.py | 1 + src/lib/components/admin/Users/Groups.svelte | 1 + .../components/admin/Users/Groups/EditGroupModal.svelte | 7 +++++++ src/lib/components/admin/Users/Groups/Permissions.svelte | 9 +++++++++ src/lib/components/chat/SettingsModal.svelte | 2 +- 6 files changed, 25 insertions(+), 1 deletion(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index 02f61696ecc..0bafc98ae78 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1034,6 +1034,11 @@ def oidc_oauth_register(client): == "true" ) +USER_PERMISSIONS_FEATURES_DIRECT_TOOL_SERVERS = ( + os.environ.get("USER_PERMISSIONS_FEATURES_DIRECT_TOOL_SERVERS", "False").lower() + == "true" +) + USER_PERMISSIONS_FEATURES_WEB_SEARCH = ( os.environ.get("USER_PERMISSIONS_FEATURES_WEB_SEARCH", "True").lower() == "true" ) @@ -1071,6 +1076,7 @@ def oidc_oauth_register(client): "temporary_enforced": USER_PERMISSIONS_CHAT_TEMPORARY_ENFORCED, }, "features": { + "direct_tool_servers": USER_PERMISSIONS_FEATURES_DIRECT_TOOL_SERVERS, "web_search": USER_PERMISSIONS_FEATURES_WEB_SEARCH, "image_generation": USER_PERMISSIONS_FEATURES_IMAGE_GENERATION, "code_interpreter": USER_PERMISSIONS_FEATURES_CODE_INTERPRETER, diff --git a/backend/open_webui/routers/users.py b/backend/open_webui/routers/users.py index 4cf9102e144..d1046bcedb8 100644 --- a/backend/open_webui/routers/users.py +++ b/backend/open_webui/routers/users.py @@ -93,6 +93,7 @@ class ChatPermissions(BaseModel): class FeaturesPermissions(BaseModel): + direct_tool_servers: bool = False web_search: bool = True image_generation: bool = True code_interpreter: bool = True diff --git a/src/lib/components/admin/Users/Groups.svelte b/src/lib/components/admin/Users/Groups.svelte index e2375a624e9..e287feb1d53 100644 --- a/src/lib/components/admin/Users/Groups.svelte +++ b/src/lib/components/admin/Users/Groups.svelte @@ -67,6 +67,7 @@ temporary_enforced: false }, features: { + direct_tool_servers: false, web_search: true, image_generation: true, code_interpreter: true diff --git a/src/lib/components/admin/Users/Groups/EditGroupModal.svelte b/src/lib/components/admin/Users/Groups/EditGroupModal.svelte index e492cc9b6d2..5b6bf6aabc7 100644 --- a/src/lib/components/admin/Users/Groups/EditGroupModal.svelte +++ b/src/lib/components/admin/Users/Groups/EditGroupModal.svelte @@ -38,6 +38,12 @@ prompts: false, tools: false }, + sharing: { + public_models: false, + public_knowledge: false, + public_prompts: false, + public_tools: false + }, chat: { controls: true, file_upload: true, @@ -46,6 +52,7 @@ temporary: true }, features: { + direct_tool_servers: false, web_search: true, image_generation: true, code_interpreter: true diff --git a/src/lib/components/admin/Users/Groups/Permissions.svelte b/src/lib/components/admin/Users/Groups/Permissions.svelte index 389477166f0..5dac0de94c6 100644 --- a/src/lib/components/admin/Users/Groups/Permissions.svelte +++ b/src/lib/components/admin/Users/Groups/Permissions.svelte @@ -28,6 +28,7 @@ temporary_enforced: false }, features: { + direct_tool_servers: false, web_search: true, image_generation: true, code_interpreter: true @@ -295,6 +296,14 @@
{$i18n.t('Features Permissions')}
+
+
+ {$i18n.t('Direct Tool Servers')} +
+ + +
+
{$i18n.t('Web Search')} diff --git a/src/lib/components/chat/SettingsModal.svelte b/src/lib/components/chat/SettingsModal.svelte index 15bf9c0baeb..ed2e9eb7a6d 100644 --- a/src/lib/components/chat/SettingsModal.svelte +++ b/src/lib/components/chat/SettingsModal.svelte @@ -488,7 +488,7 @@ {/if} {:else if tabId === 'tools'} - {#if $user?.role === 'admin' || ($user?.role === 'user' && $config?.features?.enable_direct_tools)} + {#if $user?.role === 'admin' || ($user?.role === 'user' && $user?.permissions?.features?.direct_tool_servers)}
@@ -268,7 +269,7 @@
-
+
{$i18n.t('API keys')}
diff --git a/src/lib/components/chat/Settings/Audio.svelte b/src/lib/components/chat/Settings/Audio.svelte index 0131aaae0dd..9b896628d4b 100644 --- a/src/lib/components/chat/Settings/Audio.svelte +++ b/src/lib/components/chat/Settings/Audio.svelte @@ -293,7 +293,7 @@
@@ -330,7 +330,7 @@
diff --git a/src/lib/components/chat/Settings/General.svelte b/src/lib/components/chat/Settings/General.svelte index d0c812ad4c4..b3e6cec8a4b 100644 --- a/src/lib/components/chat/Settings/General.svelte +++ b/src/lib/components/chat/Settings/General.svelte @@ -309,14 +309,15 @@
{#if $user?.role === 'admin' || $user?.permissions.chat?.controls} -
+
{$i18n.t('System Prompt')}
-