From 5ec6b4aace1d6bdc2e3843ff614f72f076744593 Mon Sep 17 00:00:00 2001 From: mudler <2420543+mudler@users.noreply.github.com> Date: Mon, 8 Dec 2025 10:16:49 +0000 Subject: [PATCH] chore(model gallery): :robot: add new models via gallery agent Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- gallery/index.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index faab804c3a76..a253693c6b40 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -23139,3 +23139,30 @@ - filename: Maenad-70B.i1-Q4_K_M.gguf sha256: dd5615ba1ab4ce2a3614afd547e0457fc14c7182de0d2e5f80d84323ee53ec1f uri: huggingface://mradermacher/Maenad-70B-i1-GGUF/Maenad-70B.i1-Q4_K_M.gguf +- !!merge <<: *gptoss + name: "kamil-gptoss20b-i1" + urls: + - https://huggingface.co/mradermacher/kamil-gptoss20b-i1-GGUF + description: | + The model described here is a **quantized version** of the original **KamilArif/kamil-gptoss20b** base model, formatted as a **GGUF** file. It is a large language model optimized for text generation tasks, based on the **transformers** library. The "i1-GGUF" variant provides a balance of speed and quality, with quantization parameters tailored for efficient inference. While the base model is designed for high-quality text generation, this quantized version is ideal for applications requiring reduced memory footprint without significant loss in performance. + overrides: + parameters: + model: llama-cpp/models/kamil-gptoss20b.i1-Q4_K_M.gguf + name: kamil-gptoss20b-i1-GGUF + backend: llama-cpp + template: + use_tokenizer_template: true + known_usecases: + - chat + function: + grammar: + disable: true + description: Imported from https://huggingface.co/mradermacher/kamil-gptoss20b-i1-GGUF + options: + - use_jinja:true + + files: + - filename: llama-cpp/models/kamil-gptoss20b.i1-Q4_K_M.gguf + sha256: e35af06bd59c329c0ac7f7f5a07ce8c5cf3415d81ae2bb3ec1a96355346be53b + uri: https://huggingface.co/mradermacher/kamil-gptoss20b-i1-GGUF/resolve/main/kamil-gptoss20b.i1-Q4_K_M.gguf +